From 8f47e76465a6f84206e0d6daf4a13aff0bb1f83b Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 22:18:57 +0700 Subject: [PATCH 001/539] Compute true loss --- .../run_image_captioning_flax.py | 34 +++++++++++-------- examples/flax/language-modeling/README.md | 2 +- .../language-modeling/run_bart_dlm_flax.py | 31 ++++++++++------- .../flax/language-modeling/run_mlm_flax.py | 29 +++++++++------- .../summarization/run_summarization_flax.py | 34 +++++++++++-------- 5 files changed, 76 insertions(+), 54 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 4552defb8efc45..db5b14e142042d 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -926,8 +926,9 @@ def loss_fn(logits, labels, padding_mask, label_smoothing_factor=0.0): # ignore padded tokens from loss loss = loss * padding_mask - loss = loss.sum() / padding_mask.sum() - return loss + loss = loss.sum() + num_labels = padding_mask.sum() + return loss, num_labels # Define gradient update step fn def train_step(state, batch, label_smoothing_factor=0.0): @@ -936,17 +937,21 @@ def train_step(state, batch, label_smoothing_factor=0.0): def compute_loss(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) - return loss - - grad_fn = jax.value_and_grad(compute_loss) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") - + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + return loss, num_labels + + grad_fn = jax.value_and_grad(compute_loss, has_aux=True) + (loss, num_labels), grad = grad_fn(state.params) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples + grad = jax.lax.psum(grad, "batch") + grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics @@ -954,11 +959,12 @@ def compute_loss(params): def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] - loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) - + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) # summarize metrics metrics = {"loss": loss} - metrics = jax.lax.pmean(metrics, axis_name="batch") return metrics # Define generation function @@ -1253,4 +1259,4 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/examples/flax/language-modeling/README.md b/examples/flax/language-modeling/README.md index 5b83ed06545946..75fd64eb0856a5 100644 --- a/examples/flax/language-modeling/README.md +++ b/examples/flax/language-modeling/README.md @@ -351,7 +351,7 @@ The example script uses the 🤗 Datasets library. You can easily customize them To setup all relevant files for training, let's create a directory. ```bash -mkdir ./norwegian-roberta-base +mkdir ./norwegian-bart-base ``` ### Train tokenizer diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 5c8bf1bbc45dda..1d962eb0dd00f8 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -799,18 +799,23 @@ def loss_fn(params): loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average - loss = loss.sum() / label_mask.sum() - - return loss - - grad_fn = jax.value_and_grad(loss_fn) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") + loss = loss.sum() + num_labels = label_mask.sum() + + return loss, num_labels + + grad_fn = jax.value_and_grad(loss_fn, has_aux=True) + (loss, num_labels), grad = grad_fn(state.params) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples + grad = jax.lax.psum(grad, "batch") + grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) - metrics = jax.lax.pmean( - {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" - ) + metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} return new_state, metrics, new_dropout_rng @@ -888,7 +893,7 @@ def eval_step(params, batch): num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) - eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) + eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) eval_metrics = [] for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): @@ -928,7 +933,7 @@ def eval_step(params, batch): num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) - eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) + eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) eval_metrics = [] for _, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): @@ -961,4 +966,4 @@ def eval_step(params, batch): if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index f3f3c324ecfea6..820c0f2087b17d 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -709,23 +709,28 @@ def loss_fn(params): logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - # compute loss, ignore padded input tokens + # compute loss, ignore padded input tokens and special tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average - loss = loss.sum() / label_mask.sum() - - return loss - - grad_fn = jax.value_and_grad(loss_fn) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") + loss = loss.sum() + num_labels = label_mask.sum() + + return loss, num_labels + + grad_fn = jax.value_and_grad(loss_fn, has_aux=True) + (loss, num_labels), grad = grad_fn(state.params) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples + grad = jax.lax.psum(grad, "batch") + grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) - metrics = jax.lax.pmean( - {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" - ) + metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} return new_state, metrics, new_dropout_rng @@ -876,4 +881,4 @@ def eval_step(params, batch): if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index 856fd6fdb7b36a..2bd9c44a163c6f 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -775,8 +775,9 @@ def loss_fn(logits, labels, padding_mask, label_smoothing_factor=0.0): # ignore padded tokens from loss loss = loss * padding_mask - loss = loss.sum() / padding_mask.sum() - return loss + loss = loss.sum() + num_labels = padding_mask.sum() + return loss, num_labels # Define gradient update step fn def train_step(state, batch, label_smoothing_factor=0.0): @@ -785,17 +786,21 @@ def train_step(state, batch, label_smoothing_factor=0.0): def compute_loss(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) - return loss - - grad_fn = jax.value_and_grad(compute_loss) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") - + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + return loss, num_labels + + grad_fn = jax.value_and_grad(compute_loss, has_aux=True) + (loss, num_labels), grad = grad_fn(state.params) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples + grad = jax.lax.psum(grad, "batch") + grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics @@ -803,11 +808,12 @@ def compute_loss(params): def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] - loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) - + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) # summarize metrics metrics = {"loss": loss} - metrics = jax.lax.pmean(metrics, axis_name="batch") return metrics # Define generation function @@ -970,4 +976,4 @@ def generate_step(params, batch): if __name__ == "__main__": - main() + main() \ No newline at end of file From 4863e61a9b6b69dbc397a56b3d6ce080eb0757f0 Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 15:24:50 +0000 Subject: [PATCH 002/539] fixup --- .../image-captioning/run_image_captioning_flax.py | 13 +------------ .../flax/language-modeling/run_bart_dlm_flax.py | 2 +- examples/flax/language-modeling/run_mlm_flax.py | 2 +- examples/flax/language-modeling/run_t5_mlm_flax.py | 2 -- .../flax/summarization/run_summarization_flax.py | 2 +- 5 files changed, 4 insertions(+), 17 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index db5b14e142042d..a6968d9725c107 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -335,7 +335,6 @@ def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuf batch_idx = np.arange(len(dataset)) for idx in range(steps): - start_idx = batch_size * idx end_idx = batch_size * (idx + 1) @@ -347,7 +346,6 @@ def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuf def write_metric(summary_writer, metrics, train_time, step, metric_key_prefix="train"): - if train_time: summary_writer.scalar("train_time", train_time, step) @@ -782,11 +780,9 @@ def blockwise_data_loader( num_splits = steps // steps_per_block + int(steps % steps_per_block > 0) for idx in range(num_splits): - if not block_size: _ds = ds else: - start_idx = block_size * idx end_idx = block_size * (idx + 1) @@ -1030,7 +1026,6 @@ def evaluation_loop( ckpt_dir: str = "", is_prediction=False, ): - logger.info(f"*** {'Predict' if is_prediction else 'Evaluate'} ***") metrics = [] @@ -1109,12 +1104,10 @@ def evaluation_loop( logger.info(desc) if jax.process_index() == 0: - if not os.path.isdir(os.path.join(training_args.output_dir, ckpt_dir)): os.makedirs(os.path.join(training_args.output_dir, ckpt_dir), exist_ok=True) if metrics: - # Save metrics (only for the evaluation/prediction being done along with training) if has_tensorboard and training_args.do_train: write_metric( @@ -1149,7 +1142,6 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): input_rng = None if training_args.do_train: - cur_step = 0 train_time = 0 epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) @@ -1172,7 +1164,6 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): # train for batch_idx, _ in enumerate(tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False)): - cur_step += 1 batch = next(train_batches) batch_start = time.time() @@ -1183,7 +1174,6 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): # log and save info if training_args.logging_steps > 0 and cur_step % training_args.logging_steps == 0: - _train_metric = unreplicate(train_metric) desc = ( f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | Loss: {_train_metric['loss']} |" @@ -1223,7 +1213,6 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): # log and save info if training_args.logging_steps <= 0: - logger.info(desc) with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp: @@ -1259,4 +1248,4 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 1d962eb0dd00f8..204395a2c78fc8 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -966,4 +966,4 @@ def eval_step(params, batch): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index 820c0f2087b17d..132b34c8fbc544 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -881,4 +881,4 @@ def eval_step(params, batch): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/flax/language-modeling/run_t5_mlm_flax.py b/examples/flax/language-modeling/run_t5_mlm_flax.py index a2906c410879b9..e174f0b38d0ed5 100755 --- a/examples/flax/language-modeling/run_t5_mlm_flax.py +++ b/examples/flax/language-modeling/run_t5_mlm_flax.py @@ -328,7 +328,6 @@ class FlaxDataCollatorForT5MLM: decoder_start_token_id: int def __call__(self, examples: List[Dict[str, np.ndarray]]) -> BatchEncoding: - # convert list to dict and tensorize input batch = BatchEncoding( {k: np.array([examples[i][k] for i in range(len(examples))]) for k, v in examples[0].items()} @@ -397,7 +396,6 @@ def filter_input_ids(self, input_ids, sentinel_ids): return input_ids def random_spans_noise_mask(self, length): - """This function is copy of `random_spans_helper `__ . Noise mask consisting of random spans of noise tokens. diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index 2bd9c44a163c6f..c909a5abe85f9b 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -976,4 +976,4 @@ def generate_step(params, batch): if __name__ == "__main__": - main() \ No newline at end of file + main() From b4a7282e040d5ccc0f1013cfd494b34d21752a1d Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 22:28:59 +0700 Subject: [PATCH 003/539] final --- .../flax/image-captioning/run_image_captioning_flax.py | 8 ++++++-- examples/flax/summarization/run_summarization_flax.py | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index db5b14e142042d..d0a362290bbf35 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -943,27 +943,31 @@ def compute_loss(params): grad_fn = jax.value_and_grad(compute_loss, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - return new_state, metrics # Define eval fn def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) - # summarize metrics + metrics = {"loss": loss} return metrics diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index 2bd9c44a163c6f..5faf9803bb8f44 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -792,27 +792,31 @@ def compute_loss(params): grad_fn = jax.value_and_grad(compute_loss, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - return new_state, metrics # Define eval fn def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) - # summarize metrics + metrics = {"loss": loss} return metrics From cbd0a6fd6c49bdc19da35723f0ea60f489b4ff20 Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 22:31:00 +0700 Subject: [PATCH 004/539] final --- examples/flax/language-modeling/run_bart_dlm_flax.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 204395a2c78fc8..e6e4b8f0d0cd11 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -807,16 +807,17 @@ def loss_fn(params): grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - return new_state, metrics, new_dropout_rng # Create parallel version of the train step From 1c6bcf7f98507bc4a41d419deac9a0c3ed6f1a5c Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 22:32:43 +0700 Subject: [PATCH 005/539] final --- examples/flax/language-modeling/run_mlm_flax.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index 132b34c8fbc544..caf3f5f8190ee6 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -709,7 +709,7 @@ def loss_fn(params): logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - # compute loss, ignore padded input tokens and special tokens + # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask @@ -722,9 +722,11 @@ def loss_fn(params): grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_map(lambda x: x / num_labels, grad) From be41eaf55feab433d5454c14127e642d85693dc4 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Wed, 3 Aug 2022 21:09:54 +0530 Subject: [PATCH 006/539] fix: keras fit tests for segformer tf and minor refactors. (#18412) * fix: keras fit tests for segformer tf and minor refactors. * refactor: test_keras_fit to make it simpler using the existing one. * fix: styling issues. --- .../segformer/test_modeling_tf_segformer.py | 62 ++++--------------- 1 file changed, 11 insertions(+), 51 deletions(-) diff --git a/tests/models/segformer/test_modeling_tf_segformer.py b/tests/models/segformer/test_modeling_tf_segformer.py index fbf38fa1d6b2bb..6cc2c77fe935fc 100644 --- a/tests/models/segformer/test_modeling_tf_segformer.py +++ b/tests/models/segformer/test_modeling_tf_segformer.py @@ -18,8 +18,6 @@ import unittest from typing import List, Tuple -import numpy as np - from transformers import SegformerConfig from transformers.file_utils import is_tf_available, is_vision_available from transformers.testing_utils import require_tf, slow @@ -331,64 +329,26 @@ def recursive_check(tuple_object, dict_object): # todo: incorporate label support for semantic segmentation in `test_modeling_tf_common.py`. + @unittest.skipIf( + not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, + reason="TF (<=2.8) does not support backprop for grouped convolutions on CPU.", + ) def test_dataset_conversion(self): - gpus = tf.config.list_physical_devices("GPU") - # Grouped convs aren't supported on CPUs for backprop. - if len(gpus) >= 1: - super().test_dataset_conversion() + super().test_dataset_conversion() + @unittest.skipIf( + not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, + reason="TF (<=2.8) does not support backprop for grouped convolutions on CPU.", + ) def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() - gpus = tf.config.list_physical_devices("GPU") - - def apply(model): - if getattr(model, "hf_compute_loss", None): - model_weights = model.get_weights() - - # Test that model correctly compute the loss with kwargs - for_segmentation = True if model_class.__name__ == "TFSegformerForSemanticSegmentation" else False - _, prepared_for_class = self.model_tester.prepare_config_and_inputs_for_keras_fit( - for_segmentation=for_segmentation - ) - - label_names = {"labels"} - self.assertGreater(len(label_names), 0, msg="No matching label names found!") - labels = {key: val for key, val in prepared_for_class.items() if key in label_names} - inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} - self.assertGreater(len(inputs_minus_labels), 0) - model.compile(optimizer=tf.keras.optimizers.SGD(0.0), run_eagerly=True) - - # Make sure the model fits without crashing regardless of where we pass the labels - history1 = model.fit( - prepared_for_class, - validation_data=prepared_for_class, - steps_per_epoch=1, - validation_steps=1, - shuffle=False, - ) - val_loss1 = history1.history["val_loss"][0] - - # We reinitialize the model here even though our learning rate was zero - # because BatchNorm updates weights by means other than gradient descent. - model.set_weights(model_weights) - history2 = model.fit( - inputs_minus_labels, - labels, - validation_data=(inputs_minus_labels, labels), - steps_per_epoch=1, - validation_steps=1, - shuffle=False, - ) - val_loss2 = history2.history["val_loss"][0] - self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3)) for model_class in self.all_model_classes: # Since `TFSegformerModel` cannot operate with the default `fit()` method. if model_class.__name__ != "TFSegformerModel": - # Grouped convs and backprop with them isn't supported on CPUs. model = model_class(config) - if len(gpus) > 1: - apply(model) + if getattr(model, "hf_compute_loss", None): + super().test_keras_fit() def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() From 02b176c4ce14340d26d42825523f406959c6c202 Mon Sep 17 00:00:00 2001 From: LSinev Date: Wed, 3 Aug 2022 20:37:18 +0300 Subject: [PATCH 007/539] Fix torch version comparisons (#18460) Comparisons like version.parse(torch.__version__) > version.parse("1.6") are True for torch==1.6.0+cu101 or torch==1.6.0+cpu version.parse(version.parse(torch.__version__).base_version) are preferred (and available in pytorch_utils.py --- examples/research_projects/wav2vec2/run_asr.py | 2 +- .../wav2vec2/run_common_voice.py | 2 +- .../research_projects/wav2vec2/run_pretrain.py | 2 +- src/transformers/activations.py | 6 +++--- src/transformers/convert_graph_to_onnx.py | 4 +++- .../models/albert/modeling_albert.py | 10 +++++++--- src/transformers/models/bert/modeling_bert.py | 10 +++++++--- .../models/big_bird/modeling_big_bird.py | 5 ++--- .../models/convbert/modeling_convbert.py | 10 +++++++--- .../models/data2vec/modeling_data2vec_text.py | 10 +++++++--- .../modeling_decision_transformer.py | 10 +++++++--- .../models/distilbert/modeling_distilbert.py | 10 +++++++--- .../models/electra/modeling_electra.py | 10 +++++++--- .../models/flaubert/modeling_flaubert.py | 4 ++-- src/transformers/models/flava/modeling_flava.py | 4 ++-- src/transformers/models/fnet/modeling_fnet.py | 5 ++--- src/transformers/models/gpt2/modeling_gpt2.py | 11 ++++++++--- .../models/imagegpt/modeling_imagegpt.py | 11 ++++++++--- src/transformers/models/mctct/modeling_mctct.py | 4 ++-- src/transformers/models/nezha/modeling_nezha.py | 10 +++++++--- .../nystromformer/modeling_nystromformer.py | 10 +++++++--- .../models/qdqbert/modeling_qdqbert.py | 5 ++--- src/transformers/models/realm/modeling_realm.py | 10 +++++++--- .../models/roberta/modeling_roberta.py | 10 +++++++--- src/transformers/models/vilt/modeling_vilt.py | 12 ++++++++---- .../xlm_roberta_xl/modeling_xlm_roberta_xl.py | 10 +++++++--- src/transformers/models/yoso/modeling_yoso.py | 10 +++++++--- src/transformers/onnx/convert.py | 3 ++- src/transformers/pipelines/base.py | 4 +++- src/transformers/pytorch_utils.py | 8 ++++++-- src/transformers/trainer.py | 17 +++++++++++------ src/transformers/trainer_pt_utils.py | 2 +- src/transformers/utils/import_utils.py | 6 +++--- ...ling_{{cookiecutter.lowercase_modelname}}.py | 4 ++-- 34 files changed, 164 insertions(+), 87 deletions(-) diff --git a/examples/research_projects/wav2vec2/run_asr.py b/examples/research_projects/wav2vec2/run_asr.py index ab9db11d2a02fa..692aa39796a769 100755 --- a/examples/research_projects/wav2vec2/run_asr.py +++ b/examples/research_projects/wav2vec2/run_asr.py @@ -30,7 +30,7 @@ if is_apex_available(): from apex import amp -if version.parse(torch.__version__) >= version.parse("1.6"): +if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast diff --git a/examples/research_projects/wav2vec2/run_common_voice.py b/examples/research_projects/wav2vec2/run_common_voice.py index 10a3a77fa75873..01a877a8092ecf 100644 --- a/examples/research_projects/wav2vec2/run_common_voice.py +++ b/examples/research_projects/wav2vec2/run_common_voice.py @@ -33,7 +33,7 @@ from apex import amp -if version.parse(torch.__version__) >= version.parse("1.6"): +if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast diff --git a/examples/research_projects/wav2vec2/run_pretrain.py b/examples/research_projects/wav2vec2/run_pretrain.py index fb430d14074836..8e0801429e61ec 100755 --- a/examples/research_projects/wav2vec2/run_pretrain.py +++ b/examples/research_projects/wav2vec2/run_pretrain.py @@ -26,7 +26,7 @@ if is_apex_available(): from apex import amp -if version.parse(torch.__version__) >= version.parse("1.6"): +if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast diff --git a/src/transformers/activations.py b/src/transformers/activations.py index fad8d106134766..5d413bba728b7b 100644 --- a/src/transformers/activations.py +++ b/src/transformers/activations.py @@ -44,7 +44,7 @@ class GELUActivation(nn.Module): def __init__(self, use_gelu_python: bool = False): super().__init__() - if version.parse(torch.__version__) < version.parse("1.4") or use_gelu_python: + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.4") or use_gelu_python: self.act = self._gelu_python else: self.act = nn.functional.gelu @@ -110,7 +110,7 @@ class SiLUActivation(nn.Module): def __init__(self): super().__init__() - if version.parse(torch.__version__) < version.parse("1.7"): + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"): self.act = self._silu_python else: self.act = nn.functional.silu @@ -130,7 +130,7 @@ class MishActivation(nn.Module): def __init__(self): super().__init__() - if version.parse(torch.__version__) < version.parse("1.9"): + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.9"): self.act = self._mish_python else: self.act = nn.functional.mish diff --git a/src/transformers/convert_graph_to_onnx.py b/src/transformers/convert_graph_to_onnx.py index c757fab8ff5a90..59fb8ed39b01c1 100644 --- a/src/transformers/convert_graph_to_onnx.py +++ b/src/transformers/convert_graph_to_onnx.py @@ -273,6 +273,8 @@ def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format import torch from torch.onnx import export + from .pytorch_utils import is_torch_less_than_1_11 + print(f"Using framework PyTorch: {torch.__version__}") with torch.no_grad(): @@ -281,7 +283,7 @@ def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility - if parse(torch.__version__) <= parse("1.10.99"): + if is_torch_less_than_1_11: export( nlp.model, model_args, diff --git a/src/transformers/models/albert/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py index 169f1faeb8baa2..78df7911a2a0c4 100755 --- a/src/transformers/models/albert/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -20,7 +20,6 @@ from typing import Dict, List, Optional, Tuple, Union import torch -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -35,7 +34,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -212,7 +216,7 @@ def __init__(self, config: AlbertConfig): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/bert/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py index c1ef87551b3298..495bbe2e49a9cb 100755 --- a/src/transformers/models/bert/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -24,7 +24,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -41,7 +40,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -195,7 +199,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index 06bc9251d79e89..fb30671927f469 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -23,7 +23,6 @@ import numpy as np import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -38,7 +37,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward +from ...pytorch_utils import apply_chunking_to_forward, is_torch_greater_than_1_6 from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -260,7 +259,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/convbert/modeling_convbert.py b/src/transformers/models/convbert/modeling_convbert.py index 9884d32aca7ec2..136685ad6c1ce1 100755 --- a/src/transformers/models/convbert/modeling_convbert.py +++ b/src/transformers/models/convbert/modeling_convbert.py @@ -22,7 +22,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -36,7 +35,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, SequenceSummary -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_convbert import ConvBertConfig @@ -194,7 +198,7 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/data2vec/modeling_data2vec_text.py b/src/transformers/models/data2vec/modeling_data2vec_text.py index 9c85d346174aaf..8a7d6308bf5744 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_text.py +++ b/src/transformers/models/data2vec/modeling_data2vec_text.py @@ -19,7 +19,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -35,7 +34,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -83,7 +87,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/decision_transformer/modeling_decision_transformer.py b/src/transformers/models/decision_transformer/modeling_decision_transformer.py index 959b9763d0bd48..77804e75547770 100755 --- a/src/transformers/models/decision_transformer/modeling_decision_transformer.py +++ b/src/transformers/models/decision_transformer/modeling_decision_transformer.py @@ -21,12 +21,16 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer +from ...pytorch_utils import ( + Conv1D, + find_pruneable_heads_and_indices, + is_torch_greater_or_equal_than_1_6, + prune_conv1d_layer, +) from ...utils import ( ModelOutput, add_start_docstrings, @@ -36,7 +40,7 @@ ) -if version.parse(torch.__version__) >= version.parse("1.6"): +if is_torch_greater_or_equal_than_1_6: is_amp_available = True from torch.cuda.amp import autocast else: diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index fc5b5a7b0f7c12..1282788a57dd23 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -23,7 +23,6 @@ import numpy as np import torch -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -40,7 +39,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -102,7 +106,7 @@ def __init__(self, config: PretrainedConfig): self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12) self.dropout = nn.Dropout(config.dropout) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index 3f488fbcf5648b..c215256b3e5f4e 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -21,7 +21,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -37,7 +36,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, SequenceSummary -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -165,7 +169,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index 9721880ac97657..4733c5d09b855f 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -19,10 +19,10 @@ from typing import Dict, Optional, Tuple, Union import torch -from packaging import version from torch import nn from ...modeling_outputs import BaseModelOutput +from ...pytorch_utils import is_torch_greater_than_1_6 from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from ..xlm.modeling_xlm import ( XLMForMultipleChoice, @@ -139,7 +139,7 @@ def __init__(self, config): # , dico, is_encoder, with_output): super().__init__(config) self.layerdrop = getattr(config, "layerdrop", 0.0) self.pre_norm = getattr(config, "pre_norm", False) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) diff --git a/src/transformers/models/flava/modeling_flava.py b/src/transformers/models/flava/modeling_flava.py index c0841a0e277230..9201a987609a80 100644 --- a/src/transformers/models/flava/modeling_flava.py +++ b/src/transformers/models/flava/modeling_flava.py @@ -22,7 +22,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from transformers.utils.doc import add_code_sample_docstrings @@ -30,6 +29,7 @@ from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import is_torch_greater_than_1_6 from ...utils import ( ModelOutput, add_start_docstrings, @@ -392,7 +392,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index 8ed67182319ff9..e2347adce961c2 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -21,7 +21,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -44,7 +43,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward +from ...pytorch_utils import apply_chunking_to_forward, is_torch_greater_than_1_6 from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -118,7 +117,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 1c61adb10d9f64..4c6495d353d13e 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -22,12 +22,18 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from ...pytorch_utils import ( + Conv1D, + find_pruneable_heads_and_indices, + is_torch_greater_or_equal_than_1_6, + prune_conv1d_layer, +) + -if version.parse(torch.__version__) >= version.parse("1.6"): +if is_torch_greater_or_equal_than_1_6: is_amp_available = True from torch.cuda.amp import autocast else: @@ -41,7 +47,6 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, SequenceSummary -from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, diff --git a/src/transformers/models/imagegpt/modeling_imagegpt.py b/src/transformers/models/imagegpt/modeling_imagegpt.py index c80f16267c6999..e71ea4a272c2d0 100755 --- a/src/transformers/models/imagegpt/modeling_imagegpt.py +++ b/src/transformers/models/imagegpt/modeling_imagegpt.py @@ -21,12 +21,18 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from ...pytorch_utils import ( + Conv1D, + find_pruneable_heads_and_indices, + is_torch_greater_or_equal_than_1_6, + prune_conv1d_layer, +) + -if version.parse(torch.__version__) >= version.parse("1.6"): +if is_torch_greater_or_equal_than_1_6: is_amp_available = True from torch.cuda.amp import autocast else: @@ -39,7 +45,6 @@ SequenceClassifierOutputWithPast, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_imagegpt import ImageGPTConfig diff --git a/src/transformers/models/mctct/modeling_mctct.py b/src/transformers/models/mctct/modeling_mctct.py index 25d368b7dc75d6..3eb59a0c419beb 100755 --- a/src/transformers/models/mctct/modeling_mctct.py +++ b/src/transformers/models/mctct/modeling_mctct.py @@ -21,7 +21,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from ...activations import ACT2FN @@ -34,6 +33,7 @@ find_pruneable_heads_and_indices, prune_linear_layer, ) +from ...pytorch_utils import is_torch_greater_than_1_6 from ...utils import logging from .configuration_mctct import MCTCTConfig @@ -153,7 +153,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), diff --git a/src/transformers/models/nezha/modeling_nezha.py b/src/transformers/models/nezha/modeling_nezha.py index ab37c142bc22b3..4fa38b3ed48f09 100644 --- a/src/transformers/models/nezha/modeling_nezha.py +++ b/src/transformers/models/nezha/modeling_nezha.py @@ -23,7 +23,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -39,7 +38,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -183,7 +187,7 @@ def __init__(self, config): # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros((1, config.max_position_embeddings), dtype=torch.long), diff --git a/src/transformers/models/nystromformer/modeling_nystromformer.py b/src/transformers/models/nystromformer/modeling_nystromformer.py index b5813af781b72f..e1f352d2c89798 100755 --- a/src/transformers/models/nystromformer/modeling_nystromformer.py +++ b/src/transformers/models/nystromformer/modeling_nystromformer.py @@ -20,7 +20,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -34,7 +33,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_nystromformer import NystromformerConfig @@ -68,7 +72,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), diff --git a/src/transformers/models/qdqbert/modeling_qdqbert.py b/src/transformers/models/qdqbert/modeling_qdqbert.py index 805da6516fd491..35890625b1ffbd 100755 --- a/src/transformers/models/qdqbert/modeling_qdqbert.py +++ b/src/transformers/models/qdqbert/modeling_qdqbert.py @@ -23,7 +23,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -40,7 +39,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import find_pruneable_heads_and_indices, is_torch_greater_than_1_6, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -167,7 +166,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/realm/modeling_realm.py b/src/transformers/models/realm/modeling_realm.py index f63ea07ad96002..6ee2b1fd14b402 100644 --- a/src/transformers/models/realm/modeling_realm.py +++ b/src/transformers/models/realm/modeling_realm.py @@ -20,7 +20,6 @@ from typing import Optional, Tuple, Union import torch -from packaging import version from torch import nn from torch.nn import CrossEntropyLoss @@ -32,7 +31,12 @@ ModelOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_realm import RealmConfig @@ -181,7 +185,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index 0b57b1031e537b..46add0be500195 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -20,7 +20,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -36,7 +35,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -83,7 +87,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/vilt/modeling_vilt.py b/src/transformers/models/vilt/modeling_vilt.py index f20573d0d5141a..308358850c9808 100755 --- a/src/transformers/models/vilt/modeling_vilt.py +++ b/src/transformers/models/vilt/modeling_vilt.py @@ -21,7 +21,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import CrossEntropyLoss @@ -35,14 +34,19 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + find_pruneable_heads_and_indices, + is_torch_greater_or_equal_than_1_10, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_vilt import ViltConfig logger = logging.get_logger(__name__) -if version.parse(torch.__version__) < version.parse("1.10.0"): +if not is_torch_greater_or_equal_than_1_10: logger.warning( f"You are using torch=={torch.__version__}, but torch>=1.10.0 is required to use " "ViltModel. Please upgrade torch." @@ -251,7 +255,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py index 70dd4221573be8..aa41466767d688 100644 --- a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py @@ -19,7 +19,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -35,7 +34,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -76,7 +80,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), diff --git a/src/transformers/models/yoso/modeling_yoso.py b/src/transformers/models/yoso/modeling_yoso.py index 2977cfe64c0004..085d46bdfb5504 100644 --- a/src/transformers/models/yoso/modeling_yoso.py +++ b/src/transformers/models/yoso/modeling_yoso.py @@ -21,7 +21,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss @@ -35,7 +34,12 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_yoso import YosoConfig @@ -257,7 +261,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), diff --git a/src/transformers/onnx/convert.py b/src/transformers/onnx/convert.py index d4deb796683df1..a896b76a1cca4e 100644 --- a/src/transformers/onnx/convert.py +++ b/src/transformers/onnx/convert.py @@ -34,6 +34,7 @@ if is_torch_available(): from ..modeling_utils import PreTrainedModel + from ..pytorch_utils import is_torch_less_than_1_11 if is_tf_available(): from ..modeling_tf_utils import TFPreTrainedModel @@ -155,7 +156,7 @@ def export_pytorch( # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility - if parse(torch.__version__) < parse("1.10"): + if is_torch_less_than_1_11: # export can work with named args but the dict containing named args # has to be the last element of the args tuple. try: diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index 29a12e7df22a30..a3e11eb60060df 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -967,7 +967,9 @@ def postprocess(self, model_outputs: ModelOutput, **postprocess_parameters: Dict def get_inference_context(self): inference_context = ( - torch.inference_mode if version.parse(torch.__version__) >= version.parse("1.9.0") else torch.no_grad + torch.inference_mode + if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.9.0") + else torch.no_grad ) return inference_context diff --git a/src/transformers/pytorch_utils.py b/src/transformers/pytorch_utils.py index c7bfba81fb9852..571a5d7d3c941b 100644 --- a/src/transformers/pytorch_utils.py +++ b/src/transformers/pytorch_utils.py @@ -25,8 +25,12 @@ logger = logging.get_logger(__name__) -is_torch_less_than_1_8 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.8.0") -is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11") +parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) +is_torch_greater_or_equal_than_1_6 = parsed_torch_version_base >= version.parse("1.6.0") +is_torch_greater_than_1_6 = parsed_torch_version_base > version.parse("1.6.0") +is_torch_less_than_1_8 = parsed_torch_version_base < version.parse("1.8.0") +is_torch_greater_or_equal_than_1_10 = parsed_torch_version_base >= version.parse("1.10") +is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11") def torch_int_div(tensor1, tensor2): diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 37a21b0939c9e2..90a30aaa9f6554 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -71,7 +71,12 @@ from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model from .models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES from .optimization import Adafactor, get_scheduler -from .pytorch_utils import ALL_LAYERNORM_LAYERS +from .pytorch_utils import ( + ALL_LAYERNORM_LAYERS, + is_torch_greater_or_equal_than_1_6, + is_torch_greater_or_equal_than_1_10, + is_torch_less_than_1_11, +) from .tokenization_utils_base import PreTrainedTokenizerBase from .trainer_callback import ( CallbackHandler, @@ -165,11 +170,11 @@ if is_apex_available(): from apex import amp -if version.parse(torch.__version__) >= version.parse("1.6"): +if is_torch_greater_or_equal_than_1_6: _is_torch_generator_available = True _is_native_cuda_amp_available = True -if version.parse(torch.__version__) >= version.parse("1.10"): +if is_torch_greater_or_equal_than_1_10: _is_native_cpu_amp_available = True if is_datasets_available(): @@ -405,7 +410,7 @@ def __init__( # Would have to update setup.py with torch>=1.12.0 # which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0 # below is the current alternative. - if version.parse(torch.__version__) < version.parse("1.12.0"): + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"): raise ValueError("FSDP requires PyTorch >= 1.12.0") from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy @@ -1676,7 +1681,7 @@ def _inner_training_loop( is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( train_dataloader.sampler, RandomSampler ) - if version.parse(torch.__version__) < version.parse("1.11") or not is_random_sampler: + if is_torch_less_than_1_11 or not is_random_sampler: # We just need to begin an iteration to create the randomization of the sampler. # That was before PyTorch 1.11 however... for _ in train_dataloader: @@ -2430,7 +2435,7 @@ def autocast_smart_context_manager(self): arguments, depending on the situation. """ if self.use_cuda_amp or self.use_cpu_amp: - if version.parse(torch.__version__) >= version.parse("1.10"): + if is_torch_greater_or_equal_than_1_10: ctx_manager = ( torch.cpu.amp.autocast(dtype=self.amp_dtype) if self.use_cpu_amp diff --git a/src/transformers/trainer_pt_utils.py b/src/transformers/trainer_pt_utils.py index c264f89d07402a..e1ad471b07a9e0 100644 --- a/src/transformers/trainer_pt_utils.py +++ b/src/transformers/trainer_pt_utils.py @@ -835,7 +835,7 @@ def _get_learning_rate(self): last_lr = ( # backward compatibility for pytorch schedulers self.lr_scheduler.get_last_lr()[0] - if version.parse(torch.__version__) >= version.parse("1.4") + if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.4") else self.lr_scheduler.get_lr()[0] ) return last_lr diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 363d337e2b04eb..37172d14fcc289 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -300,7 +300,7 @@ def is_torch_bf16_gpu_available(): # 4. torch.autocast exists # XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's # really only correct for the 0th gpu (or currently set default device if different from 0) - if version.parse(torch.__version__) < version.parse("1.10"): + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.10"): return False if torch.cuda.is_available() and torch.version.cuda is not None: @@ -322,7 +322,7 @@ def is_torch_bf16_cpu_available(): import torch - if version.parse(torch.__version__) < version.parse("1.10"): + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.10"): return False try: @@ -357,7 +357,7 @@ def is_torch_tf32_available(): return False if int(torch.version.cuda.split(".")[0]) < 11: return False - if version.parse(torch.__version__) < version.parse("1.7"): + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"): return False return True diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py index b2ffcbb6c2c94c..cbe8153c0ec70a 100755 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py @@ -22,7 +22,6 @@ import torch import torch.utils.checkpoint -from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from typing import Optional, Tuple, Union @@ -48,6 +47,7 @@ apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, + is_torch_greater_than_1_6, ) from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config @@ -157,7 +157,7 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if version.parse(torch.__version__) > version.parse("1.6.0"): + if is_torch_greater_than_1_6: self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), From b69a62d579ded31d490e0c9a01bf9ecb81cb9b65 Mon Sep 17 00:00:00 2001 From: Thomas Wang <24695242+thomasw21@users.noreply.github.com> Date: Thu, 4 Aug 2022 11:08:03 +0200 Subject: [PATCH 008/539] [BLOOM] Clean modeling code (#18344) * Cleanup some code * Improve signatures * Try to reduce the number of reshape/copies * I don't think we actually need the layer_num scaling trick * No need for duplication * Try to fix beam_search * Fix beam search * Removing layer num normalization seems to be breaking * Not sure self.layer_number normalization actually matters * Try and be backward compatible * Try to fix beam_search * Revert attempt to be backward compatible * Improve documentation on past_key_values format * Optimize the device allocation in case of hidden_states in multiple devices * No need to manually cast the values to a specific device * Rename with long version of variables * Improve type hinting * Add comment that explains that some methods return views * Actually i think the attention casting only makes sense when we use torch.float16 * We don't actually need layer_number to be passed anymore * Fix FX test * Bypass torch.baddbmm * Apply suggestions from code review * Add comment about support for torchScript v1.11 * fix ONNX support for bloom (#18456) Co-authored-by: Niklas Muennighoff Co-authored-by: Nouamane Tazi --- .../models/bloom/configuration_bloom.py | 15 +- .../models/bloom/modeling_bloom.py | 471 ++++++++++-------- 2 files changed, 275 insertions(+), 211 deletions(-) diff --git a/src/transformers/models/bloom/configuration_bloom.py b/src/transformers/models/bloom/configuration_bloom.py index 23ecc6d9267114..a33a6339b14e99 100644 --- a/src/transformers/models/bloom/configuration_bloom.py +++ b/src/transformers/models/bloom/configuration_bloom.py @@ -214,14 +214,19 @@ def generate_dummy_inputs( batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 - past_shape = ( - batch, + head_dim = self._config.hidden_size // self.num_attention_heads + past_key_shape = ( + batch * self.num_attention_heads, + head_dim, past_key_values_length, - self.num_attention_heads, - self._config.hidden_size // self.num_attention_heads, + ) + past_value_shape = ( + batch * self.num_attention_heads, + past_key_values_length, + head_dim, ) ordered_inputs["past_key_values"] = [ - (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers) + (torch.zeros(past_key_shape), torch.zeros(past_value_shape)) for _ in range(self.num_layers) ] ordered_inputs["attention_mask"] = common_inputs["attention_mask"] diff --git a/src/transformers/models/bloom/modeling_bloom.py b/src/transformers/models/bloom/modeling_bloom.py index afa289afe5b798..a33054a3835113 100644 --- a/src/transformers/models/bloom/modeling_bloom.py +++ b/src/transformers/models/bloom/modeling_bloom.py @@ -16,12 +16,13 @@ import math import warnings -from typing import Tuple, Union +from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss +from torch.nn import functional as F from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( @@ -52,102 +53,100 @@ ] -def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): +def _make_causal_mask( + input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int +) -> torch.BoolTensor: """ - Make causal mask used for bi-directional self-attention. + Make causal mask used for self-attention. """ batch_size, target_length = input_ids_shape - mask = torch.full((target_length, target_length), torch.finfo(dtype).min) - mask_cond = torch.arange(mask.size(-1)) - intermediate_mask = mask_cond < (mask_cond + 1).view(mask.size(-1), 1) - mask.masked_fill_(intermediate_mask, 0) - mask = mask.to(dtype) + mask = torch.empty((target_length, target_length + past_key_values_length), dtype=torch.bool, device=device) + # ONNX doesn't support `torch.Tensor.triu` properly, thus we use this workaround + seq_ids = torch.arange(target_length, device=device) + mask[:, past_key_values_length:] = seq_ids[:, None] < seq_ids[None, :] if past_key_values_length > 0: - mask = torch.cat([torch.zeros(target_length, past_key_values_length, dtype=dtype), mask], dim=-1) + mask[:, :past_key_values_length] = False + expanded_mask = mask[None, None, :, :].expand(batch_size, 1, target_length, target_length + past_key_values_length) return expanded_mask -def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: int = None): +def _expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`. """ - batch_size, source_length = mask.size() - tgt_len = tgt_len if tgt_len is not None else source_length - - expanded_mask = mask[:, None, None, :].expand(batch_size, 1, tgt_len, source_length).to(dtype) - - inverted_mask = 1.0 - expanded_mask + batch_size, src_length = mask.shape + tgt_length = tgt_length if tgt_length is not None else src_length - return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + expanded_mask = ~(mask[:, None, None, :].to(torch.bool)) + return expanded_mask.expand(batch_size, 1, tgt_length, src_length) -def build_alibi_tensor(attention_mask: torch.Tensor, n_head: int, dtype, device) -> torch.Tensor: +def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: """ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value `softmax(l+a) = softmax(l)`. Based on https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742 + TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly. Args: - Returns tensor shaped (batch_size * n_head, 1, max_seq_len) + Returns tensor shaped (batch_size * num_heads, 1, max_seq_len) attention_mask (`torch.Tensor`): Token-wise attention mask, this should be of shape (batch_size, max_seq_len). - n_head (`int`, *required*): + num_heads (`int`, *required*): number of heads dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`): dtype of the output tensor - device (`torch.device`, *optional*, default=`torch.device('cpu')`): - device of the output alibi tensor """ - closest_power_of_2 = 2 ** math.floor(math.log2(n_head)) - base = torch.tensor(2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=device, dtype=torch.float32) - powers = torch.arange(1, 1 + closest_power_of_2, device=device, dtype=torch.int32) + batch_size, seq_length = attention_mask.shape + closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) + base = torch.tensor( + 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 + ) + powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) slopes = torch.pow(base, powers) - if closest_power_of_2 != n_head: + if closest_power_of_2 != num_heads: extra_base = torch.tensor( - 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=device, dtype=torch.float32 + 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 ) - num_remaining_heads = min(closest_power_of_2, n_head - closest_power_of_2) - extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=device, dtype=torch.int32) + num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) + extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) # Note: alibi will added to the attention bias that will be applied to the query, key product of attention # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) - # => here we set (batch_size=1, num_heads=n_head, query_length=1, key_length=max_length) + # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) # => the query_length dimension will then be broadcasted correctly # This is more or less identical to T5's relative position bias: # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 - # batch_size = 1, n_head = n_head, query_length - - arange_tensor = (attention_mask.cumsum(-1)[:, None, :].to(device) - 1) * attention_mask[:, None] - alibi = slopes.unsqueeze(-1) * arange_tensor - alibi = alibi * attention_mask[:, None] - return alibi.reshape(alibi.shape[0] * n_head, 1, -1).to(dtype) + arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] + alibi = slopes[..., None] * arange_tensor + return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) -def dropout_add(x, residual, prob, training): +def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor: """ Dropout add function Args: x (`torch.tensor`, *required*): input tensor - residual (`torch.tensor`, *rquired*): + residual (`torch.tensor`, *required*): esidual tensor prob (`float`, *required*): dropout probability training (`bool`, *required*): training mode """ - out = nn.functional.dropout(x, p=prob, training=training) + out = F.dropout(x, p=prob, training=training) out = residual + out return out -def bloom_gelu_forward(x): +def bloom_gelu_forward(x: torch.Tensor) -> torch.Tensor: """ Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple implementation (inference) to make the model jitable. @@ -159,7 +158,7 @@ def bloom_gelu_forward(x): return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))) -def bloom_gelu_back(g, x): +def bloom_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor: """ gradient of tanh approximation of gelu gradient of actual gelu is: 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x) @@ -179,12 +178,12 @@ def bloom_gelu_back(g, x): class GeLUFunction(torch.autograd.Function): @staticmethod - def forward(ctx, input): + def forward(ctx, input: torch.Tensor) -> torch.Tensor: ctx.save_for_backward(input) return bloom_gelu_forward(input) @staticmethod - def backward(ctx, grad_output): + def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: input = ctx.saved_tensors tmp = bloom_gelu_back(grad_output, input) return tmp @@ -197,13 +196,12 @@ class BloomGelu(nn.Module): copied from Megatron-DeepSpeed code and adapted for our needs See here why autograd functions are not torchscriptable: https://github.com/pytorch/pytorch/issues/22329 - """ def __init__(self): super().__init__() - def forward(self, x): + def forward(self, x: torch.Tensor) -> torch.Tensor: if self.training: return GeLUFunction.apply(x) else: @@ -211,7 +209,7 @@ def forward(self, x): class BloomAttention(nn.Module): - def __init__(self, config, layer_number=None): + def __init__(self, config: BloomConfig): super().__init__() self.pretraining_tp = config.pretraining_tp @@ -230,106 +228,131 @@ def __init__(self, config, layer_number=None): ) # Layer-wise attention scaling - self.layer_number = max(1, layer_number) - self.norm_factor = math.sqrt(self.head_dim) * self.layer_number + self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) + self.beta = 1.0 self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True) self.dense = nn.Linear(self.hidden_size, self.hidden_size) self.attention_dropout = nn.Dropout(config.attention_dropout) - def _split_heads(self, fused_qkv): + def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ - Split the last dimension into (num_heads, head_dim) + Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory + storage as `fused_qkv` + + Args: + fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim] + + Returns: + query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim] + value: [batch_size, seq_length, num_heads, head_dim] + """ + batch_size, seq_length, three_times_hidden_size = fused_qkv.shape + fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim) + return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :] + + def _merge_heads(self, x: torch.Tensor) -> torch.Tensor: + """ + Merge heads together over the last dimenstion + + Args: + x: (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim] + + Returns: + torch.tensor: [batch_size, seq_length, num_heads * head_dim] """ - new_tensor_shape = fused_qkv.size()[:-1] + (self.num_heads, 3 * self.head_dim) - # new_tensor_shape = (fused_qkv.size(1), fused_qkv.size(0)*fused_qkv.size(2), fused_qkv.size(-1)) - # fused_qkv = fused_qkv.transpose(1, 0) - fused_qkv = fused_qkv.reshape(new_tensor_shape) - # fused_qkv = fused_qkv.permute(0, 2, 1, 3) - return torch.split(fused_qkv, self.head_dim, -1) - - def _merge_heads(self, x): # What we want to achieve is: - # batch_size * num_heads, seq_len, head_dim -> batch_size, seq_len, num_heads * head_dim + # batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim + batch_size_and_num_heads, seq_length, _ = x.shape + batch_size = batch_size_and_num_heads // self.num_heads # First view to decompose the batch size - # batch_size*num_heads, seq_len, head_dim -> batch_size, num_heads, seq_len, head_dim - x = x.view(x.size(0) // self.num_heads, self.num_heads, x.size(1), self.head_dim) + # batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim + x = x.view(batch_size, self.num_heads, seq_length, self.head_dim) - # batch_size, num_heads, seq_len, head_dim -> batch_size, seq_len, num_heads, head_dim + # batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim x = x.permute(0, 2, 1, 3) - # batch_size, seq_len, num_heads, head_dim -> batch_size, seq_len, num_heads * head_dim - return x.reshape(x.size(0), x.size(1), self.num_heads * self.head_dim) + # batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim + return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim) def forward( self, - hidden_states, - residual, - layer_past=None, - attention_mask=None, - alibi=None, - head_mask=None, - use_cache=False, - output_attentions=False, + hidden_states: torch.Tensor, + residual: torch.Tensor, + alibi: torch.Tensor, + attention_mask: torch.Tensor, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + head_mask: Optional[torch.Tensor] = None, + use_cache: bool = False, + output_attentions: bool = False, ): - alibi = alibi.to(hidden_states.device) # to make the model possible to run under accelerate fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size] # 3 x [batch_size, seq_length, num_heads, head_dim] (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv) + batch_size, q_length, _, _ = query_layer.shape + + query_layer = query_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim) + key_layer = key_layer.permute(0, 2, 3, 1).reshape(batch_size * self.num_heads, self.head_dim, q_length) + value_layer = value_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim) if layer_past is not None: past_key, past_value = layer_past - # concatenate along seq_length dimension -> [batch_size, qk_length, num_heads, head_dim] - key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=1) - value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=1) + # concatenate along seq_length dimension: + # - key: [batch_size * self.num_heads, head_dim, kv_length] + # - value: [batch_size * self.num_heads, kv_length, head_dim] + key_layer = torch.cat((past_key, key_layer), dim=2) + value_layer = torch.cat((past_value, value_layer), dim=1) + + _, _, kv_length = key_layer.shape if use_cache is True: present = (key_layer, value_layer) else: present = None - beta = 1.0 / self.layer_number - - # # [batch_size*num_heads, head_dim, q_length] x [batch_size*num_heads, head_dim, k_length] -> [batch_size*num_heads, q_length, k_length] - matmul_result = (1.0 / self.norm_factor) * torch.bmm( - query_layer.transpose(1, 2).reshape(-1, query_layer.shape[1], query_layer.shape[3]), - key_layer.permute(0, 2, 3, 1).reshape(-1, key_layer.shape[3], key_layer.shape[1]), - ) + beta * alibi + # [batch_size * num_heads, q_length, kv_length] + # we use `torch.Tensor.baddbmm` instead of `torch.baddbmm` as the latter isn't supported by TorchScript v1.11 + matmul_result = alibi.baddbmm( + batch1=query_layer, + batch2=key_layer, + beta=self.beta, + alpha=self.inv_norm_factor, + ) - # change view to [batch_size, num_heads, q_length, k_length] - attention_scores = matmul_result.view(-1, self.num_heads, matmul_result.size(1), matmul_result.size(2)) + # change view to [batch_size, num_heads, q_length, kv_length] + attention_scores = matmul_result.view(batch_size, self.num_heads, q_length, kv_length) - # We replace the scaled softmax by just a few line of code - [batch_size, num_heads, q_length, k_length] + # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length] input_dtype = attention_scores.dtype - attn_weights = (attention_scores * self.layer_number) + attention_mask - attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) - attention_probs = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(input_dtype) - attention_probs = attention_probs * (~attention_mask.to(torch.bool)) - # [batch_size, num_heads, q_length, k_length] + # `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38` + if input_dtype == torch.float16: + attention_scores = attention_scores.to(torch.float) + attn_weights = torch.masked_fill(attention_scores, attention_mask, torch.finfo(attention_scores.dtype).min) + attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(input_dtype) + + # [batch_size, num_heads, q_length, kv_length] attention_probs = self.attention_dropout(attention_probs) if head_mask is not None: attention_probs = attention_probs * head_mask - # change view [batch_size x num_heads, q_length, k_length] - attention_probs_reshaped = attention_probs.view(matmul_result.shape) + # change view [batch_size x num_heads, q_length, kv_length] + attention_probs_reshaped = attention_probs.view(batch_size * self.num_heads, q_length, kv_length) # matmul: [batch_size * num_heads, q_length, head_dim] - context_layer = torch.bmm( - attention_probs_reshaped, value_layer.transpose(1, 2).reshape(-1, value_layer.size(1), value_layer.size(3)) - ) + context_layer = torch.bmm(attention_probs_reshaped, value_layer) # change view [batch_size, num_heads, q_length, head_dim] context_layer = self._merge_heads(context_layer) # aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232 if self.pretraining_tp > 1 and self.slow_but_exact: - slices = context_layer.shape[-1] / self.pretraining_tp + slices = self.hidden_size / self.pretraining_tp output_tensor = torch.zeros_like(context_layer) for i in range(self.pretraining_tp): - output_tensor = output_tensor + nn.functional.linear( + output_tensor = output_tensor + F.linear( context_layer[:, :, int(i * slices) : int((i + 1) * slices)], self.dense.weight[:, int(i * slices) : int((i + 1) * slices)], ) @@ -346,7 +369,7 @@ def forward( class BloomMLP(nn.Module): - def __init__(self, config): + def __init__(self, config: BloomConfig): super().__init__() hidden_size = config.hidden_size @@ -357,14 +380,14 @@ def __init__(self, config): self.dense_4h_to_h = nn.Linear(4 * hidden_size, hidden_size) self.hidden_dropout = config.hidden_dropout - def forward(self, hidden_states, residual): + def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor: hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states)) if self.pretraining_tp > 1 and self.slow_but_exact: intermediate_output = torch.zeros_like(residual) slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp for i in range(self.pretraining_tp): - intermediate_output = intermediate_output + nn.functional.linear( + intermediate_output = intermediate_output + F.linear( hidden_states[:, :, int(i * slices) : int((i + 1) * slices)], self.dense_4h_to_h.weight[:, int(i * slices) : int((i + 1) * slices)], ) @@ -377,13 +400,13 @@ def forward(self, hidden_states, residual): class BloomBlock(nn.Module): - def __init__(self, config, layer_number=None): + def __init__(self, config: BloomConfig): super().__init__() hidden_size = config.hidden_size self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) - self.n_head = config.n_head - self.self_attention = BloomAttention(config, layer_number=layer_number) + self.num_heads = config.n_head + self.self_attention = BloomAttention(config) self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon) self.mlp = BloomMLP(config) @@ -393,13 +416,13 @@ def __init__(self, config, layer_number=None): def forward( self, - hidden_states, - layer_past=None, - attention_mask=None, - head_mask=None, - use_cache=False, - output_attentions=False, - alibi=None, + hidden_states: torch.Tensor, + alibi: torch.Tensor, + attention_mask: torch.Tensor, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + head_mask: Optional[torch.Tensor] = None, + use_cache: bool = False, + output_attentions: bool = False, ): # hidden_states: [batch_size, seq_length, hidden_size] @@ -462,9 +485,9 @@ class BloomPreTrainedModel(PreTrainedModel): def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) - def _init_weights(self, module): + def _init_weights(self, module: nn.Module): """Initialize the weights.""" - if isinstance(module, (nn.Linear)): + if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) @@ -478,7 +501,7 @@ def _init_weights(self, module): module.bias.data.zero_() module.weight.data.fill_(1.0) - def _set_gradient_checkpointing(self, module, value=False): + def _set_gradient_checkpointing(self, module: nn.Module, value: bool = False): if isinstance(module, BloomModel): module.gradient_checkpointing = value @@ -501,9 +524,8 @@ def _set_gradient_checkpointing(self, module, value=False): BLOOM_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): - `input_ids_length` = `sequence_length` if `past_key_values` is `None` else - `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input - sequence tokens in the vocabulary. + `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]` + (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. @@ -516,6 +538,10 @@ def _set_gradient_checkpointing(self, module, value=False): Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have their past given to this model should not be passed as `input_ids` as they have already been computed. + + Each element of `past_key_values` is a tuple (past_key, past_value): + - past_key: [batch_size * num_heads, head_dim, kv_length] + - past_value: [batch_size * num_heads, kv_length, head_dim] attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: @@ -555,19 +581,18 @@ def _set_gradient_checkpointing(self, module, value=False): BLOOM_START_DOCSTRING, ) class BloomModel(BloomPreTrainedModel): - def __init__(self, config): + def __init__(self, config: BloomConfig): super().__init__(config) self.embed_dim = config.hidden_size - self.n_head = config.n_head + self.num_heads = config.n_head # Embedding + LN Embedding self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim) - self.word_embeddings_layernorm = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) # Transformer blocks - self.h = nn.ModuleList([BloomBlock(config, layer_number=i) for i in range(config.num_hidden_layers)]) + self.h = nn.ModuleList([BloomBlock(config) for _ in range(config.num_hidden_layers)]) # Final Layer Norm self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) @@ -580,25 +605,29 @@ def __init__(self, config): def get_input_embeddings(self): return self.word_embeddings - def _prepare_attn_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + def _prepare_attn_mask( + self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int + ) -> torch.BoolTensor: # create causal mask - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length] combined_attention_mask = None - if input_shape[-1] > 1: + device = attention_mask.device + _, src_length = input_shape + + if src_length > 1: combined_attention_mask = _make_causal_mask( - input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length - ).to(attention_mask.device) - - if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) - combined_attention_mask = ( - expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + input_shape, device=device, past_key_values_length=past_key_values_length ) + # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length] + expanded_attn_mask = _expand_mask(attention_mask, tgt_length=src_length) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask + ) + return combined_attention_mask - def set_input_embeddings(self, new_embeddings): + def set_input_embeddings(self, new_embeddings: torch.Tensor): self.word_embeddings = new_embeddings @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING) @@ -610,17 +639,17 @@ def set_input_embeddings(self, new_embeddings): ) def forward( self, - input_ids=None, - past_key_values=None, - attention_mask=None, - head_mask=None, - inputs_embeds=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, **deprecated_arguments - ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: if deprecated_arguments.pop("position_ids", False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None` warnings.warn( @@ -641,10 +670,9 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) + batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] + batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") @@ -653,8 +681,8 @@ def forward( # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head - # attention_probs has shape bsz x n_head x N x N - # head_mask has shape n_layer x batch x n_head x N x N + # attention_probs has shape batch_size x num_heads x N x N + # head_mask has shape n_layer x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: @@ -662,27 +690,28 @@ def forward( hidden_states = self.word_embeddings_layernorm(inputs_embeds) - output_shape = input_shape + (hidden_states.size(-1),) - presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None # Compute alibi tensor: check build_alibi_tensor documentation - current_sequence_length = hidden_states.shape[1] + seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values[0] is not None: - past_key_values_length = past_key_values[0][0].shape[1] - current_sequence_length += past_key_values_length - + past_key_values_length = past_key_values[0][0].shape[2] + seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is None: - attention_mask = torch.ones((hidden_states.shape[0], current_sequence_length), device=hidden_states.device) + attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) - alibi = build_alibi_tensor(attention_mask, self.n_head, hidden_states.dtype, hidden_states.device) + alibi = build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype) - causal_mask = self._prepare_attn_mask(attention_mask, input_shape, inputs_embeds, past_key_values_length) + causal_mask = self._prepare_attn_mask( + attention_mask, + input_shape=(batch_size, seq_length), + past_key_values_length=past_key_values_length, + ) for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): @@ -700,14 +729,14 @@ def forward( def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value - return module(*inputs, use_cache, output_attentions, alibi) + return module(*inputs, use_cache=use_cache, output_attentions=output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, - None, + alibi, causal_mask, head_mask[i], ) @@ -735,8 +764,6 @@ def custom_forward(*inputs): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) - hidden_states = hidden_states.view(output_shape) - if not return_dict: return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) @@ -758,7 +785,7 @@ def custom_forward(*inputs): class BloomForCausalLM(BloomPreTrainedModel): _keys_to_ignore_on_load_missing = [r"h.*.self_attention.scale_mask_softmax.causal_mask", r"lm_head.weight"] - def __init__(self, config): + def __init__(self, config: BloomConfig): super().__init__(config) self.transformer = BloomModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) @@ -769,16 +796,20 @@ def __init__(self, config): def get_output_embeddings(self): return self.lm_head - def set_output_embeddings(self, new_embeddings): + def set_output_embeddings(self, new_embeddings: torch.Tensor): self.lm_head = new_embeddings - def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): - # only last token for inputs_ids if past is defined in kwargs + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor, + past: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + **kwargs + ) -> dict: + # only last token for input_ids if past is not None if past: input_ids = input_ids[:, -1].unsqueeze(-1) - attention_mask = kwargs.get("attention_mask", None) - return { "input_ids": input_ids, "past_key_values": past, @@ -795,16 +826,16 @@ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs): ) def forward( self, - input_ids=None, - past_key_values=None, - attention_mask=None, - head_mask=None, - inputs_embeds=None, - labels=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, **deprecated_arguments ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" @@ -845,9 +876,12 @@ def forward( # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() + batch_size, seq_length, vocab_size = shift_logits.shape # Flatten the tokens loss_fct = CrossEntropyLoss() - loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + loss = loss_fct( + shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length) + ) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] @@ -862,14 +896,36 @@ def forward( ) @staticmethod - def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: + def _reorder_cache( + past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. + + Output shares the same memory storage as `past`. """ + batch_size_times_num_heads, head_dim, seq_length = past[0][0].shape + batch_size = len(beam_idx) + num_heads = batch_size_times_num_heads // batch_size + # Get a copy of `beam_idx` on all the devices where we need those indices. + device_to_beam_idx = { + past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past + } + # key: layer_past[0] [batch_size * num_heads, head_dim, seq_length] + # value: layer_past[1] [batch_size * num_heads, seq_length, head_dim] return tuple( - tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + ( + layer_past[0] + .view(batch_size, num_heads, head_dim, seq_length) + .index_select(0, device_to_beam_idx[layer_past[0].device]) + .view(batch_size_times_num_heads, head_dim, seq_length), + layer_past[1] + .view(batch_size, num_heads, seq_length, head_dim) + .index_select(0, device_to_beam_idx[layer_past[0].device]) + .view(batch_size_times_num_heads, seq_length, head_dim), + ) for layer_past in past ) @@ -892,7 +948,7 @@ def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> class BloomForSequenceClassification(BloomPreTrainedModel): _keys_to_ignore_on_load_missing = [r"h.*.self_attention.scale_mask_softmax.causal_mask", r"lm_head.weight"] - def __init__(self, config): + def __init__(self, config: BloomConfig): super().__init__(config) self.num_labels = config.num_labels self.transformer = BloomModel(config) @@ -910,16 +966,16 @@ def __init__(self, config): ) def forward( self, - input_ids=None, - past_key_values=None, - attention_mask=None, - head_mask=None, - inputs_embeds=None, - labels=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, **deprecated_arguments ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: r""" @@ -966,7 +1022,7 @@ def forward( sequence_lengths = -1 else: if input_ids is not None: - sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1 + sequence_lengths = torch.ne(input_ids, self.config.pad_token_id).sum(dim=-1) - 1 else: sequence_lengths = -1 logger.warning( @@ -994,7 +1050,7 @@ def forward( loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() - loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + loss = loss_fct(pooled_logits, labels) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(pooled_logits, labels) @@ -1021,7 +1077,7 @@ def forward( class BloomForTokenClassification(BloomPreTrainedModel): _keys_to_ignore_on_load_missing = [r"h.*.self_attention.scale_mask_softmax.causal_mask", r"lm_head.weight"] - def __init__(self, config): + def __init__(self, config: BloomConfig): super().__init__(config) self.num_labels = config.num_labels @@ -1047,16 +1103,16 @@ def __init__(self, config): ) def forward( self, - input_ids=None, - past_key_values=None, - attention_mask=None, - head_mask=None, - inputs_embeds=None, - labels=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, **deprecated_arguments ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" @@ -1095,8 +1151,11 @@ def forward( loss = None if labels is not None: + batch_size, seq_length = labels.shape loss_fct = CrossEntropyLoss() - loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + loss = loss_fct( + logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length) + ) if not return_dict: output = (logits,) + transformer_outputs[2:] From fc1d841b2db7d110fe5983b3a00c80508edf79eb Mon Sep 17 00:00:00 2001 From: nlpcat <83209776+nlpcat@users.noreply.github.com> Date: Thu, 4 Aug 2022 03:26:11 -0700 Subject: [PATCH 009/539] change shape to support dynamic batch input in tf.function XLA generate for tf serving (#18372) * change shape to support dynamic batch input in tf.generate * add tests Co-authored-by: nlpcatcode --- src/transformers/generation_tf_utils.py | 25 ++++++++------- tests/test_modeling_tf_common.py | 41 +++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 13 deletions(-) diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index ec9a61e90099fd..a3d26b789c646e 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -1533,7 +1533,7 @@ def _generate( # 2. Define model inputs input_ids = self._prepare_model_inputs(input_ids, bos_token_id) # inputs_ids now has to be defined and cannot be None anymore - batch_size = input_ids.shape[0] + batch_size = shape_list(input_ids)[0] # 3. Prepare other model kwargs if output_attentions is not None: @@ -1702,7 +1702,8 @@ def _generate( @staticmethod def _expand_to_num_beams(tensor: tf.Tensor, num_beams: int) -> tf.Tensor: - return tf.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:]) + shape = shape_list(tensor) + return tf.broadcast_to(tensor[:, None], (shape[0], num_beams) + tuple(shape[1:])) def _prepare_attention_mask_for_generation( self, @@ -2162,7 +2163,7 @@ def greedy_search( decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function - batch_size, cur_len = input_ids.shape + batch_size, cur_len = shape_list(input_ids) # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) @@ -2432,7 +2433,7 @@ def sample( decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function - batch_size, cur_len = input_ids.shape + batch_size, cur_len = shape_list(input_ids) # initialize `generated` (pre-populated with `pad_token_id`), `finished_sequences` input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) @@ -2678,18 +2679,16 @@ def beam_search( def flatten_beam_dim(tensor, batch_axis=0): """Flattens the first two dimensions of a non-scalar array.""" + shape = shape_list(tensor) return tf.reshape( tensor, - tensor.shape[:batch_axis] - + [tensor.shape[batch_axis] * tensor.shape[batch_axis + 1]] - + tensor.shape[batch_axis + 2 :], + shape[:batch_axis] + [shape[batch_axis] * shape[batch_axis + 1]] + shape[batch_axis + 2 :], ) def unflatten_beam_dim(tensor, batch_size, num_beams, batch_axis=0): """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" - return tf.reshape( - tensor, tensor.shape[:batch_axis] + [batch_size, num_beams] + tensor.shape[batch_axis + 1 :] - ) + shape = shape_list(tensor) + return tf.reshape(tensor, shape[:batch_axis] + [batch_size, num_beams] + shape[batch_axis + 1 :]) def gather_beams(nested, beam_indices, batch_axis=0): """Gathers the beam slices indexed by beam_indices into new beam array.""" @@ -2748,7 +2747,7 @@ def gather_fn(tensor): decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None # 3. init tensors to use for "xla-compileable" generate function - batch_size, num_beams, cur_len = input_ids.shape + batch_size, num_beams, cur_len = shape_list(input_ids) # per batch, beam-item holding current token in loop, pre-populated with `pad_token_id` input_ids_padding = tf.ones((batch_size, num_beams, max_length - cur_len), dtype=tf.int32) * ( @@ -2894,7 +2893,7 @@ def beam_search_body_fn( eos_in_next_token = tf.broadcast_to(eos_in_next_token, topk_sequences[:, :, cur_len].shape) did_topk_just_finished = eos_in_next_token & tf.broadcast_to( tf.concat((tf.ones((num_beams), dtype=tf.bool), tf.zeros((num_beams), dtype=tf.bool)), axis=0), - eos_in_next_token.shape, + shape_list(eos_in_next_token), ) # non-top `num_beams` eos tokens can't be used to finish a beam, but the others can't be used in the next @@ -2917,7 +2916,7 @@ def beam_search_body_fn( topk_log_probs = topk_log_probs / (tf.cast(cur_len, dtype=tf.float32) ** length_penalty) beams_in_batch_are_full = ( tf.broadcast_to( - tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), did_topk_just_finished.shape + tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), shape_list(did_topk_just_finished) ) & early_stopping ) diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index 31338e56f972e4..d63b1b32733e89 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -73,6 +73,7 @@ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, BertConfig, TFAutoModel, + TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFBertModel, TFSharedEmbeddings, @@ -2163,6 +2164,46 @@ def test_checkpoint_sharding_local(self): for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) + def test_generate_tf_function_export(self): + test_model = TFAutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") + max_length = 8 + + class DummyModel(tf.Module): + def __init__(self, model): + super(DummyModel, self).__init__() + self.model = model + + @tf.function( + input_signature=( + tf.TensorSpec((None, max_length), tf.int32, name="input_ids"), + tf.TensorSpec((None, max_length), tf.int32, name="attention_mask"), + ), + jit_compile=True, + ) + def serving(self, input_ids, attention_mask): + outputs = self.model.generate( + input_ids=input_ids, + attention_mask=attention_mask, + max_new_tokens=max_length, + return_dict_in_generate=True, + ) + return {"sequences": outputs["sequences"]} + + dummy_input_ids = [[2, 3, 4, 1, 0, 0, 0, 0], [102, 103, 104, 105, 1, 0, 0, 0]] + dummy_attention_masks = [[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0]] + dummy_model = DummyModel(model=test_model) + with tempfile.TemporaryDirectory() as tmp_dir: + tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) + serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] + for batch_size in range(1, len(dummy_input_ids) + 1): + inputs = { + "input_ids": tf.constant(dummy_input_ids[:batch_size]), + "attention_mask": tf.constant(dummy_attention_masks[:batch_size]), + } + tf_func_outputs = serving_func(**inputs)["sequences"] + tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_length) + tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) + @require_tf @is_staging_test From c74befc9e328472241c175351da8a3af5b058978 Mon Sep 17 00:00:00 2001 From: Michael Benayoun Date: Thu, 4 Aug 2022 13:29:18 +0200 Subject: [PATCH 010/539] HFTracer.trace can now take callables and torch.nn.Module (#18457) * Enable HFTracer to trace with custom dummy inputs instead of pre-computed ones * Add HFTracer.trace docstring, and make it possible to handle callable and torch.nn.Module in general * Remove pdb comment * Apply suggestions --- src/transformers/utils/fx.py | 63 +++++++++++++++++++++++++++++++++--- 1 file changed, 58 insertions(+), 5 deletions(-) diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index 7eae67ba70c320..b135e7f642cd4b 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -882,11 +882,51 @@ def call_module(self, m, forward, args, kwargs): def proxy(self, node): return HFProxy(node, self) - def trace(self, root: PreTrainedModel, concrete_args: Optional[Dict[str, Any]] = None) -> Graph: + def trace( + self, + root: Union[torch.nn.Module, Callable[..., Any]], + concrete_args: Optional[Dict[str, Any]] = None, + dummy_inputs: Optional[Dict[str, Any]] = None, + complete_concrete_args_with_inputs_not_in_dummy_inputs: bool = True, + ) -> Graph: + """ + Traces `root` and returns the corresponding FX `torch.fx.Graph` representation. `root` can either be a + `torch.nn.Module` instance or a Python callable. Note that after this call, `self.root` may be different from + the `root` passed in here. For example, when a free function is passed to `trace()`, we will create a + `torch.nn.Module` instance to use as the root and add embedded constants to. + + Args: + root (`torch.nn.Module` or `Callable`): + Either a `torch.nn.Module`` or a function to be traced through. If root is not a + [`~transformers.PreTrainedModel`], then `dummy_inputs` must be passed, otherwise tracing will fail. + concrete_args (`Dict[str, Any], *optional*): + Concrete arguments that should not be treated as Proxies + dummy_inputs (`Dict[str, Any]`, *optional*): + The dummy inputs needed to handle data-dependent control-flow if `root` is not a + [`~transformers.PreTrainedModel`]. It can also be used when `root` is a + [`~transformers.PreTrainedModel`] to specify custom dummy inputs for a subset or all the model inputs. + complete_concrete_args_with_inputs_not_in_dummy_inputs (`bool`, *optional*, defaults to `True`): + If `True`, and `dummy_inputs` is specified, every argument that `root` can take that is not in + `dummy_inputs` and not in `concrete_args` will be added to `concrete_args`, otherwise does nothing. + + Returns: + `torch.fx.Graph`: + A FX `torch.fx.Graph` representing the semantics of the passed-in `root`. + + """ + sig = inspect.signature(root.forward if isinstance(root, torch.nn.Module) else root) + if concrete_args is None: concrete_args = {} - sig = inspect.signature(root.forward) + if dummy_inputs is not None and complete_concrete_args_with_inputs_not_in_dummy_inputs: + for param in sig.parameters.values(): + if param.name in dummy_inputs: + continue + if param.default is inspect.Parameter.empty: + raise ValueError(f"You need to specify a default value for the parameter {param.name}.") + concrete_args.update({p.name: p.default for p in sig.parameters.values() if p.name not in dummy_inputs}) + input_names = sig.parameters.keys() - concrete_args.keys() # Creating a random input shape to generate dummy inputs. @@ -898,11 +938,24 @@ def trace(self, root: PreTrainedModel, concrete_args: Optional[Dict[str, Any]] = num_choices = _generate_random_int(low=2, high=5) shape.insert(1, num_choices) - inputs = {} + inputs = dict(dummy_inputs) if dummy_inputs is not None else {} for input_name in input_names: - inputs.update(self._generate_dummy_input(root, input_name, shape)) + if input_name in inputs: + continue + # We enforce that root must either be a PreTrainedModel or deserialized from a serialized traced model to + # be able to use HFTracer._generate_dummy_input. + if isinstance(root, PreTrainedModel) or type(root).__qualname__.startswith("_deserialize_graph_module"): + inputs.update(self._generate_dummy_input(root, input_name, shape)) + else: + raise RuntimeError( + f"Could not generate input named {input_name} for because root is not a" + " transformers.PreTrainedModel." + ) - concrete_metas = {input_name: input_.to("meta") for input_name, input_ in inputs.items()} + concrete_metas = { + input_name: input_.to("meta") if isinstance(input_, torch.Tensor) else input_ + for input_name, input_ in inputs.items() + } for param in sig.parameters.values(): if param.kind == inspect.Parameter.VAR_KEYWORD and param.name not in input_names: concrete_metas[f"**{param.name}"] = {} From 330247ede2d8265aae9ab0b7a0d1a811c344960d Mon Sep 17 00:00:00 2001 From: Kian Sierra McGettigan <47116198+kiansierra@users.noreply.github.com> Date: Thu, 4 Aug 2022 13:29:32 +0200 Subject: [PATCH 011/539] Update no trainer scripts for multiple-choice (#18468) * swag_no_trainer updated for with gather_metrics * Removed unused variable samples_seen --- .../pytorch/multiple-choice/run_swag_no_trainer.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/examples/pytorch/multiple-choice/run_swag_no_trainer.py b/examples/pytorch/multiple-choice/run_swag_no_trainer.py index 7d5d7588c69465..a3868434b28504 100755 --- a/examples/pytorch/multiple-choice/run_swag_no_trainer.py +++ b/examples/pytorch/multiple-choice/run_swag_no_trainer.py @@ -592,19 +592,11 @@ def preprocess_function(examples): break model.eval() - samples_seen = 0 for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) - predictions, references = accelerator.gather((predictions, batch["labels"])) - # If we are in a multiprocess environment, the last batch has duplicates - if accelerator.num_processes > 1: - if step == len(eval_dataloader) - 1: - predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] - references = references[: len(eval_dataloader.dataset) - samples_seen] - else: - samples_seen += references.shape[0] + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, From df28de0581aaf6d8742c4988137caac2b6602ca8 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 4 Aug 2022 08:22:25 -0400 Subject: [PATCH 012/539] Fix load of model checkpoints in the Trainer (#18470) --- src/transformers/trainer.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 90a30aaa9f6554..e537b3b6357adb 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1935,7 +1935,7 @@ def _load_from_checkpoint(self, resume_from_checkpoint, model=None): else: # We load the model state dict on the CPU to avoid an OOM error. state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu") - load_result = model.load_state_dict(state_dict) + load_result = model.load_state_dict(state_dict, strict=False) # release memory del state_dict self._issue_warnings_after_load(load_result) @@ -1989,7 +1989,7 @@ def _load_best_model(self): # We load the model state dict on the CPU to avoid an OOM error. state_dict = torch.load(best_model_path, map_location="cpu") # If the model is on the GPU, it still works! - load_result = model.load_state_dict(state_dict) + load_result = model.load_state_dict(state_dict, strict=False) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)): From 672b66262aa4e65863f8aa94743fdd3c2a27a10b Mon Sep 17 00:00:00 2001 From: Thomas Wang <24695242+thomasw21@users.noreply.github.com> Date: Thu, 4 Aug 2022 16:02:16 +0200 Subject: [PATCH 013/539] Add FX support for torch.baddbmm andd torch.Tensor.baddbmm (#18363) --- src/transformers/utils/fx.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index b135e7f642cd4b..2198928eadb325 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -305,12 +305,22 @@ def torch_matmul(input, other, *, out=None): def torch_bmm(input, mat2, *, out=None): if out is not None: - raise ValueError("Don't support in-place abs for MetaTensor analysis") + raise ValueError("Don't support in-place bmm for MetaTensor analysis") batch_size, n, m = input.shape _, _, p = mat2.shape return torch.empty(batch_size, n, p, device="meta") +def torch_baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None): + if out is not None: + raise ValueError("Don't support in-place baddbmm for MetaTensor analysis") + return torch_bmm(batch1, batch2) + + +def torch_tensor_baddbmm(self, batch1, batch2, *, beta=1, alpha=1, out=None): + return torch_baddbmm(self, batch1, batch2, beta=beta, alpha=alpha, out=out) + + def torch_einsum(equation, *operands): # TODO: infer shape without performing the computation, this might be quite hard. concrete_operands = (torch.empty_like(operand, device="cpu") for operand in operands) @@ -495,6 +505,8 @@ def to_concrete(t): torch.Tensor.mul: torch_tensor_mul, torch.matmul: torch_matmul, torch.bmm: torch_bmm, + torch.baddbmm: torch_baddbmm, + torch.Tensor.baddbmm: torch_tensor_baddbmm, torch.einsum: torch_einsum, torch.Tensor.repeat: torch_tensor_repeat, torch.roll: torch_roll, From f9a0008d2d3082a665f711b24f5314e4a8205fab Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Thu, 4 Aug 2022 18:02:55 +0200 Subject: [PATCH 014/539] Add VideoMAE (#17821) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * First draft * Add VideoMAEForVideoClassification * Improve conversion script * Add VideoMAEForPreTraining * Add VideoMAEFeatureExtractor * Improve VideoMAEFeatureExtractor * Improve docs * Add first draft of model tests * Improve VideoMAEForPreTraining * Fix base_model_prefix * Make model take pixel_values of shape (B, T, C, H, W) * Add loss computation of VideoMAEForPreTraining * Improve tests * Improve model testsé * Make all tests pass * Add VideoMAE to main README * Add tests for VideoMAEFeatureExtractor * Add integration test * Improve conversion script * Rename patch embedding class * Remove VideoMAELayer from init * Update design of patch embeddings * Improve comments * Improve conversion script * Improve conversion script * Add conversion of pretrained model * Add loss verification of pretrained model * Add loss verification of unnormalized targets * Add integration test for pretraining model * Apply suggestions from code review * Fix bug to make feature extractor resize only shorter edge * Address more comments * Improve normalization of videos * Add doc examples * Move constants to dedicated script * Remove scripts * Transfer checkpoints, fix docs * Update script * Update image mean and std * Fix doc tests * Set return_tensors to NumPy by default * Revert the previous change Co-authored-by: Niels Rogge --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/auto.mdx | 4 + docs/source/en/model_doc/videomae.mdx | 60 + src/transformers/__init__.py | 24 + src/transformers/image_utils.py | 11 +- src/transformers/models/__init__.py | 1 + src/transformers/models/auto/__init__.py | 4 + .../models/auto/configuration_auto.py | 3 + .../models/auto/feature_extraction_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 18 + src/transformers/models/videomae/__init__.py | 77 ++ .../models/videomae/configuration_videomae.py | 148 +++ .../videomae/convert_videomae_to_pytorch.py | 286 +++++ .../videomae/feature_extraction_videomae.py | 169 +++ .../models/videomae/modeling_videomae.py | 1039 +++++++++++++++++ src/transformers/utils/__init__.py | 1 + src/transformers/utils/constants.py | 4 + src/transformers/utils/dummy_pt_objects.py | 41 + .../utils/dummy_vision_objects.py | 7 + tests/models/videomae/__init__.py | 0 .../test_feature_extraction_videomae.py | 202 ++++ .../models/videomae/test_modeling_videomae.py | 421 +++++++ tests/test_feature_extraction_common.py | 98 +- tests/test_modeling_common.py | 2 + 29 files changed, 2596 insertions(+), 33 deletions(-) create mode 100644 docs/source/en/model_doc/videomae.mdx create mode 100644 src/transformers/models/videomae/__init__.py create mode 100644 src/transformers/models/videomae/configuration_videomae.py create mode 100644 src/transformers/models/videomae/convert_videomae_to_pytorch.py create mode 100644 src/transformers/models/videomae/feature_extraction_videomae.py create mode 100644 src/transformers/models/videomae/modeling_videomae.py create mode 100644 src/transformers/utils/constants.py create mode 100644 tests/models/videomae/__init__.py create mode 100644 tests/models/videomae/test_feature_extraction_videomae.py create mode 100644 tests/models/videomae/test_modeling_videomae.py diff --git a/README.md b/README.md index 99c90a3916ddda..0cda209bdfc32c 100644 --- a/README.md +++ b/README.md @@ -368,6 +368,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. +1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. 1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. diff --git a/README_ko.md b/README_ko.md index adfaefddf628ca..c63fdca749da8f 100644 --- a/README_ko.md +++ b/README_ko.md @@ -324,6 +324,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. +1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. 1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. diff --git a/README_zh-hans.md b/README_zh-hans.md index 0e51441b407b44..0ab06bd96ad99f 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -348,6 +348,7 @@ conda install -c huggingface transformers 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。 +1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (来自 Multimedia Computing Group, Nanjing University) 伴随论文 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 由 Zhan Tong, Yibing Song, Jue Wang, Limin Wang 发布。 1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (来自 NAVER AI Lab/Kakao Enterprise/Kakao Brain) 伴随论文 [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) 由 Wonjae Kim, Bokyung Son, Ildoo Kim 发布。 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 1fbff9fa1741bd..90f29ad031b8b0 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -360,6 +360,7 @@ conda install -c huggingface transformers 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. +1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. 1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 2be8f650738079..32ab4c6361d3a7 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -381,6 +381,8 @@ title: Swin Transformer V2 - local: model_doc/van title: VAN + - local: model_doc/videomae + title: VideoMAE - local: model_doc/vit title: Vision Transformer (ViT) - local: model_doc/vit_mae diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index e8c3ed2928a7c6..5c0d51d8b7afb2 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -166,6 +166,7 @@ The library currently contains JAX, PyTorch and TensorFlow implementations, pret 1. **[UniSpeech](model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. 1. **[VAN](model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. +1. **[VideoMAE](model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. 1. **[ViLT](model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. 1. **[Vision Transformer (ViT)](model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. @@ -299,6 +300,7 @@ Flax), PyTorch, and/or TensorFlow. | UniSpeech | ❌ | ❌ | ✅ | ❌ | ❌ | | UniSpeechSat | ❌ | ❌ | ✅ | ❌ | ❌ | | VAN | ❌ | ❌ | ✅ | ❌ | ❌ | +| VideoMAE | ❌ | ❌ | ✅ | ❌ | ❌ | | ViLT | ❌ | ❌ | ✅ | ❌ | ❌ | | Vision Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ | | VisionTextDualEncoder | ❌ | ❌ | ✅ | ❌ | ✅ | diff --git a/docs/source/en/model_doc/auto.mdx b/docs/source/en/model_doc/auto.mdx index 6c32166389614e..67fc81d280a79b 100644 --- a/docs/source/en/model_doc/auto.mdx +++ b/docs/source/en/model_doc/auto.mdx @@ -118,6 +118,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its [[autodoc]] AutoModelForImageClassification +## AutoModelForVideoClassification + +[[autodoc]] AutoModelForVideoClassification + ## AutoModelForVision2Seq [[autodoc]] AutoModelForVision2Seq diff --git a/docs/source/en/model_doc/videomae.mdx b/docs/source/en/model_doc/videomae.mdx new file mode 100644 index 00000000000000..c319944dc8ed43 --- /dev/null +++ b/docs/source/en/model_doc/videomae.mdx @@ -0,0 +1,60 @@ + + +# VideoMAE + +## Overview + +The VideoMAE model was proposed in [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. +VideoMAE extends masked auto encoders ([MAE](vit_mae)) to video, claiming state-of-the-art performance on several video classification benchmarks. + +The abstract from the paper is the following: + +*Pre-training video transformers on extra large-scale datasets is generally required to achieve premier performance on relatively small datasets. In this paper, we show that video masked autoencoders (VideoMAE) are data-efficient learners for self-supervised video pre-training (SSVP). We are inspired by the recent ImageMAE and propose customized video tube masking and reconstruction. These simple designs turn out to be effective for overcoming information leakage caused by the temporal correlation during video reconstruction. We obtain three important findings on SSVP: (1) An extremely high proportion of masking ratio (i.e., 90% to 95%) still yields favorable performance of VideoMAE. The temporally redundant video content enables higher masking ratio than that of images. (2) VideoMAE achieves impressive results on very small datasets (i.e., around 3k-4k videos) without using any extra data. This is partially ascribed to the challenging task of video reconstruction to enforce high-level structure learning. (3) VideoMAE shows that data quality is more important than data quantity for SSVP. Domain shift between pre-training and target datasets are important issues in SSVP. Notably, our VideoMAE with the vanilla ViT backbone can achieve 83.9% on Kinects-400, 75.3% on Something-Something V2, 90.8% on UCF101, and 61.1% on HMDB51 without using any extra data.* + +Tips: + +- One can use [`VideoMAEFeatureExtractor`] to prepare videos for the model. It will resize + normalize all frames of a video for you. +- [`VideoMAEForPreTraining`] includes the decoder on top for self-supervised pre-training. + + + + VideoMAE pre-training. Taken from the original paper. + +This model was contributed by [nielsr](https://huggingface.co/nielsr). +The original code can be found [here](https://github.com/MCG-NJU/VideoMAE). + + +## VideoMAEConfig + +[[autodoc]] VideoMAEConfig + +## VideoMAEFeatureExtractor + +[[autodoc]] VideoMAEFeatureExtractor + - __call__ + +## VideoMAEModel + +[[autodoc]] VideoMAEModel + - forward + +## VideoMAEForPreTraining + +[[autodoc]] transformers.VideoMAEForPreTraining + - forward + +## VideoMAEForVideoClassification + +[[autodoc]] transformers.VideoMAEForVideoClassification + - forward \ No newline at end of file diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 75784ce4637659..e8cfd47f3d3b37 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -341,6 +341,7 @@ "UniSpeechSatConfig", ], "models.van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"], + "models.videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"], "models.vilt": ["VILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViltConfig", "ViltFeatureExtractor", "ViltProcessor"], "models.vision_encoder_decoder": ["VisionEncoderDecoderConfig"], "models.vision_text_dual_encoder": ["VisionTextDualEncoderConfig", "VisionTextDualEncoderProcessor"], @@ -653,6 +654,7 @@ _import_structure["models.perceiver"].append("PerceiverFeatureExtractor") _import_structure["models.poolformer"].append("PoolFormerFeatureExtractor") _import_structure["models.segformer"].append("SegformerFeatureExtractor") + _import_structure["models.videomae"].append("VideoMAEFeatureExtractor") _import_structure["models.vilt"].append("ViltFeatureExtractor") _import_structure["models.vilt"].append("ViltProcessor") _import_structure["models.vit"].append("ViTFeatureExtractor") @@ -799,6 +801,7 @@ "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING", "MODEL_FOR_VISION_2_SEQ_MAPPING", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING", "MODEL_MAPPING", @@ -825,6 +828,7 @@ "AutoModelForSpeechSeq2Seq", "AutoModelForTableQuestionAnswering", "AutoModelForTokenClassification", + "AutoModelForVideoClassification", "AutoModelForVision2Seq", "AutoModelForVisualQuestionAnswering", "AutoModelWithLMHead", @@ -1871,6 +1875,15 @@ "ViTMAEPreTrainedModel", ] ) + _import_structure["models.videomae"].extend( + [ + "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST", + "VideoMAEForPreTraining", + "VideoMAEModel", + "VideoMAEPreTrainedModel", + "VideoMAEForVideoClassification", + ] + ) _import_structure["models.wav2vec2"].extend( [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3100,6 +3113,7 @@ from .models.unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig from .models.unispeech_sat import UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechSatConfig from .models.van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig + from .models.videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig from .models.vilt import VILT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViltConfig, ViltFeatureExtractor, ViltProcessor from .models.vision_encoder_decoder import VisionEncoderDecoderConfig from .models.vision_text_dual_encoder import VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor @@ -3373,6 +3387,7 @@ from .models.perceiver import PerceiverFeatureExtractor from .models.poolformer import PoolFormerFeatureExtractor from .models.segformer import SegformerFeatureExtractor + from .models.videomae import VideoMAEFeatureExtractor from .models.vilt import ViltFeatureExtractor, ViltProcessor from .models.vit import ViTFeatureExtractor from .models.yolos import YolosFeatureExtractor @@ -3497,6 +3512,7 @@ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, MODEL_MAPPING, @@ -3523,6 +3539,7 @@ AutoModelForSpeechSeq2Seq, AutoModelForTableQuestionAnswering, AutoModelForTokenClassification, + AutoModelForVideoClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, AutoModelWithLMHead, @@ -4338,6 +4355,13 @@ VanModel, VanPreTrainedModel, ) + from .models.videomae import ( + VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST, + VideoMAEForPreTraining, + VideoMAEForVideoClassification, + VideoMAEModel, + VideoMAEPreTrainedModel, + ) from .models.vilt import ( VILT_PRETRAINED_MODEL_ARCHIVE_LIST, ViltForImageAndTextRetrieval, diff --git a/src/transformers/image_utils.py b/src/transformers/image_utils.py index ddef7a3a777e93..dd7bb326993d34 100644 --- a/src/transformers/image_utils.py +++ b/src/transformers/image_utils.py @@ -23,14 +23,15 @@ import requests from .utils import is_torch_available +from .utils.constants import ( # noqa: F401 + IMAGENET_DEFAULT_MEAN, + IMAGENET_DEFAULT_STD, + IMAGENET_STANDARD_MEAN, + IMAGENET_STANDARD_STD, +) from .utils.generic import _is_torch -IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406] -IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225] -IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5] -IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5] - ImageInput = Union[ PIL.Image.Image, np.ndarray, "torch.Tensor", List[PIL.Image.Image], List[np.ndarray], List["torch.Tensor"] # noqa ] diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 1b81ce7d8fab1d..11887db91f8393 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -137,6 +137,7 @@ unispeech, unispeech_sat, van, + videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index 09961bae14fdeb..b04c2420ef963e 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -63,6 +63,7 @@ "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", + "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING", "MODEL_FOR_VISION_2_SEQ_MAPPING", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING", "MODEL_MAPPING", @@ -89,6 +90,7 @@ "AutoModelForSpeechSeq2Seq", "AutoModelForTableQuestionAnswering", "AutoModelForTokenClassification", + "AutoModelForVideoClassification", "AutoModelForVision2Seq", "AutoModelForVisualQuestionAnswering", "AutoModelWithLMHead", @@ -203,6 +205,7 @@ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, MODEL_MAPPING, @@ -229,6 +232,7 @@ AutoModelForSpeechSeq2Seq, AutoModelForTableQuestionAnswering, AutoModelForTokenClassification, + AutoModelForVideoClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, AutoModelWithLMHead, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 13f69c024a9a0d..d8ecbb49e64f29 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -132,6 +132,7 @@ ("unispeech", "UniSpeechConfig"), ("unispeech-sat", "UniSpeechSatConfig"), ("van", "VanConfig"), + ("videomae", "VideoMAEConfig"), ("vilt", "ViltConfig"), ("vision-encoder-decoder", "VisionEncoderDecoderConfig"), ("vision-text-dual-encoder", "VisionTextDualEncoderConfig"), @@ -247,6 +248,7 @@ ("unispeech", "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("unispeech-sat", "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("van", "VAN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("videomae", "VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("vilt", "VILT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("visual_bert", "VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("vit", "VIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -388,6 +390,7 @@ ("unispeech", "UniSpeech"), ("unispeech-sat", "UniSpeechSat"), ("van", "VAN"), + ("videomae", "VideoMAE"), ("vilt", "ViLT"), ("vision-encoder-decoder", "Vision Encoder decoder"), ("vision-text-dual-encoder", "VisionTextDualEncoder"), diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index 8c4564e261c616..ed526369df4f38 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -68,6 +68,7 @@ ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("van", "ConvNextFeatureExtractor"), + ("videomae", "ViTFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index a86e8bc56da34b..bd4774c245b07b 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -127,6 +127,7 @@ ("unispeech", "UniSpeechModel"), ("unispeech-sat", "UniSpeechSatModel"), ("van", "VanModel"), + ("videomae", "VideoMAEModel"), ("vilt", "ViltModel"), ("vision-text-dual-encoder", "VisionTextDualEncoderModel"), ("visual_bert", "VisualBertModel"), @@ -187,6 +188,7 @@ ("transfo-xl", "TransfoXLLMHeadModel"), ("unispeech", "UniSpeechForPreTraining"), ("unispeech-sat", "UniSpeechSatForPreTraining"), + ("videomae", "VideoMAEForPreTraining"), ("visual_bert", "VisualBertForPreTraining"), ("vit_mae", "ViTMAEForPreTraining"), ("wav2vec2", "Wav2Vec2ForPreTraining"), @@ -381,6 +383,12 @@ ] ) +MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + ("videomae", "VideoMAEForVideoClassification"), + ] +) + MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict( [ ("vision-encoder-decoder", "VisionEncoderDecoderModel"), @@ -754,6 +762,9 @@ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES ) +MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES +) MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES @@ -938,6 +949,13 @@ class AutoModelForObjectDetection(_BaseAutoModelClass): AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc="object detection") +class AutoModelForVideoClassification(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING + + +AutoModelForVideoClassification = auto_class_update(AutoModelForVideoClassification, head_doc="video classification") + + class AutoModelForVision2Seq(_BaseAutoModelClass): _model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING diff --git a/src/transformers/models/videomae/__init__.py b/src/transformers/models/videomae/__init__.py new file mode 100644 index 00000000000000..fb239c6063ba80 --- /dev/null +++ b/src/transformers/models/videomae/__init__.py @@ -0,0 +1,77 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_videomae": ["VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "VideoMAEConfig"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_videomae"] = [ + "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST", + "VideoMAEForPreTraining", + "VideoMAEModel", + "VideoMAEPreTrainedModel", + "VideoMAEForVideoClassification", + ] + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_videomae"] = ["VideoMAEFeatureExtractor"] + +if TYPE_CHECKING: + from .configuration_videomae import VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP, VideoMAEConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_videomae import ( + VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST, + VideoMAEForPreTraining, + VideoMAEForVideoClassification, + VideoMAEModel, + VideoMAEPreTrainedModel, + ) + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_videomae import VideoMAEFeatureExtractor + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/videomae/configuration_videomae.py b/src/transformers/models/videomae/configuration_videomae.py new file mode 100644 index 00000000000000..932c4c1d98cabf --- /dev/null +++ b/src/transformers/models/videomae/configuration_videomae.py @@ -0,0 +1,148 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" VideoMAE model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VIDEOMAE_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "MCG-NJU/videomae-base": "https://huggingface.co/MCG-NJU/videomae-base/resolve/main/config.json", +} + + +class VideoMAEConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`VideoMAEModel`]. It is used to instantiate a + VideoMAE model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the VideoMAE + [MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + num_frames (`int`, *optional*, defaults to 16): + The number of frames in each video. + tubelet_size (`int`, *optional*, defaults to 2): + The number of tubelets. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to the queries, keys and values. + use_mean_pooling (`bool`, *optional*, defaults to `True`): + Whether to mean pool the final hidden states instead of using the final hidden state of the [CLS] token. + decoder_num_attention_heads (`int`, *optional*, defaults to 6): + Number of attention heads for each attention layer in the decoder. + decoder_hidden_size (`int`, *optional*, defaults to 384): + Dimensionality of the decoder. + decoder_num_hidden_layers (`int`, *optional*, defaults to 4): + Number of hidden layers in the decoder. + decoder_intermediate_size (`int`, *optional*, defaults to 1536): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder. + norm_pix_loss (`bool`, *optional*, defaults to `True`): + Whether to normalize the target patch pixels. + + Example: + + ```python + >>> from transformers import VideoMAEConfig, VideoMAEModel + + >>> # Initializing a VideoMAE videomae-base style configuration + >>> configuration = VideoMAEConfig() + + >>> # Randomly initializing a model from the configuration + >>> model = VideoMAEModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "videomae" + + def __init__( + self, + image_size=224, + patch_size=16, + num_channels=3, + num_frames=16, + tubelet_size=2, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-12, + qkv_bias=True, + use_mean_pooling=True, + decoder_num_attention_heads=6, + decoder_hidden_size=384, + decoder_num_hidden_layers=4, + decoder_intermediate_size=1536, + norm_pix_loss=True, + **kwargs + ): + super().__init__(**kwargs) + + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_frames = num_frames + self.tubelet_size = tubelet_size + + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.qkv_bias = qkv_bias + self.use_mean_pooling = use_mean_pooling + + self.decoder_num_attention_heads = decoder_num_attention_heads + self.decoder_hidden_size = decoder_hidden_size + self.decoder_num_hidden_layers = decoder_num_hidden_layers + self.decoder_intermediate_size = decoder_intermediate_size + self.norm_pix_loss = norm_pix_loss diff --git a/src/transformers/models/videomae/convert_videomae_to_pytorch.py b/src/transformers/models/videomae/convert_videomae_to_pytorch.py new file mode 100644 index 00000000000000..60e5ae8f5f41c0 --- /dev/null +++ b/src/transformers/models/videomae/convert_videomae_to_pytorch.py @@ -0,0 +1,286 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert VideoMAE checkpoints from the original repository: https://github.com/MCG-NJU/VideoMAE""" + +import argparse +import json + +import numpy as np +import torch + +import gdown +from huggingface_hub import hf_hub_download +from transformers import ( + VideoMAEConfig, + VideoMAEFeatureExtractor, + VideoMAEForPreTraining, + VideoMAEForVideoClassification, +) + + +def get_videomae_config(model_name): + config = VideoMAEConfig() + + if "large" in model_name: + config.hidden_size = 1024 + config.intermediate_size = 4096 + config.num_hidden_layers = 24 + config.num_attention_heads = 16 + config.decoder_num_hidden_layers = 12 + config.decoder_num_attention_heads = 8 + config.decoder_hidden_size = 512 + config.decoder_intermediate_size = 2048 + + if "finetuned" not in model_name: + config.use_mean_pooling = False + + if "finetuned" in model_name: + repo_id = "datasets/huggingface/label-files" + if "kinetics" in model_name: + config.num_labels = 400 + filename = "kinetics400-id2label.json" + elif "ssv2" in model_name: + config.num_labels = 174 + filename = "something-something-v2-id2label.json" + else: + raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.") + id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = {int(k): v for k, v in id2label.items()} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + + return config + + +def rename_key(name): + if "encoder." in name: + name = name.replace("encoder.", "") + if "cls_token" in name: + name = name.replace("cls_token", "videomae.embeddings.cls_token") + if "decoder_pos_embed" in name: + name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed") + if "pos_embed" in name and "decoder" not in name: + name = name.replace("pos_embed", "videomae.embeddings.position_embeddings") + if "patch_embed.proj" in name: + name = name.replace("patch_embed.proj", "videomae.embeddings.patch_embeddings.projection") + if "patch_embed.norm" in name: + name = name.replace("patch_embed.norm", "videomae.embeddings.norm") + if "decoder.blocks" in name: + name = name.replace("decoder.blocks", "decoder.decoder_layers") + if "blocks" in name: + name = name.replace("blocks", "videomae.encoder.layer") + if "attn.proj" in name: + name = name.replace("attn.proj", "attention.output.dense") + if "attn" in name and "bias" not in name: + name = name.replace("attn", "attention.self") + if "attn" in name: + name = name.replace("attn", "attention.attention") + if "norm1" in name: + name = name.replace("norm1", "layernorm_before") + if "norm2" in name: + name = name.replace("norm2", "layernorm_after") + if "mlp.fc1" in name: + name = name.replace("mlp.fc1", "intermediate.dense") + if "mlp.fc2" in name: + name = name.replace("mlp.fc2", "output.dense") + if "decoder_embed" in name: + name = name.replace("decoder_embed", "decoder.decoder_embed") + if "decoder_norm" in name: + name = name.replace("decoder_norm", "decoder.decoder_norm") + if "decoder_pred" in name: + name = name.replace("decoder_pred", "decoder.decoder_pred") + if "norm.weight" in name and "decoder" not in name and "fc" not in name: + name = name.replace("norm.weight", "videomae.layernorm.weight") + if "norm.bias" in name and "decoder" not in name and "fc" not in name: + name = name.replace("norm.bias", "videomae.layernorm.bias") + if "head" in name and "decoder" not in name: + name = name.replace("head", "classifier") + + return name + + +def convert_state_dict(orig_state_dict, config): + for key in orig_state_dict.copy().keys(): + val = orig_state_dict.pop(key) + + if key.startswith("encoder."): + key = key.replace("encoder.", "") + + if "qkv" in key: + key_split = key.split(".") + if key.startswith("decoder.blocks"): + dim = config.decoder_hidden_size + layer_num = int(key_split[2]) + prefix = "decoder.decoder_layers." + if "weight" in key: + orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] + orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] + orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] + else: + dim = config.hidden_size + layer_num = int(key_split[1]) + prefix = "videomae.encoder.layer." + if "weight" in key: + orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] + orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] + orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] + else: + orig_state_dict[rename_key(key)] = val + + return orig_state_dict + + +# We will verify our results on a video of eating spaghetti +# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] +def prepare_video(): + file = hf_hub_download(repo_id="datasets/hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy") + video = np.load(file) + return list(video) + + +def convert_videomae_checkpoint(checkpoint_url, pytorch_dump_folder_path, model_name, push_to_hub): + config = get_videomae_config(model_name) + + if "finetuned" in model_name: + model = VideoMAEForVideoClassification(config) + else: + model = VideoMAEForPreTraining(config) + + # download original checkpoint, hosted on Google Drive + output = "pytorch_model.bin" + gdown.cached_download(checkpoint_url, output, quiet=False) + files = torch.load(output, map_location="cpu") + if "model" in files: + state_dict = files["model"] + else: + state_dict = files["module"] + new_state_dict = convert_state_dict(state_dict, config) + + model.load_state_dict(new_state_dict) + model.eval() + + # verify model on basic input + feature_extractor = VideoMAEFeatureExtractor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) + video = prepare_video() + inputs = feature_extractor(video, return_tensors="pt") + + if "finetuned" not in model_name: + local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") + inputs["bool_masked_pos"] = torch.load(local_path) + + outputs = model(**inputs) + logits = outputs.logits + + model_names = [ + # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) + "videomae-base-short", + "videomae-base-short-finetuned-kinetics", + "videomae-base", + "videomae-base-finetuned-kinetics", + "videomae-large", + "videomae-large-finetuned-kinetics", + # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) + "videomae-base-short-ssv2", + "videomae-base-short-finetuned-ssv2", + "videomae-base-ssv2", + "videomae-base-finetuned-ssv2", + ] + + # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] + if model_name == "videomae-base": + expected_shape = torch.Size([1, 1408, 1536]) + expected_slice = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]]) + elif model_name == "videomae-base-short": + expected_shape = torch.Size([1, 1408, 1536]) + expected_slice = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]]) + # we verified the loss both for normalized and unnormalized targets for this one + expected_loss = torch.tensor([0.5142]) if config.norm_pix_loss else torch.tensor([0.6469]) + elif model_name == "videomae-large": + expected_shape = torch.Size([1, 1408, 1536]) + expected_slice = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]]) + elif model_name == "videomae-large-finetuned-kinetics": + expected_shape = torch.Size([1, 400]) + expected_slice = torch.tensor([0.0771, 0.0011, -0.3625]) + elif model_name == "videomae-base-short-finetuned-kinetics": + expected_shape = torch.Size([1, 400]) + expected_slice = torch.tensor([0.6588, 0.0990, -0.2493]) + elif model_name == "videomae-base-finetuned-kinetics": + expected_shape = torch.Size([1, 400]) + expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]) + elif model_name == "videomae-base-short-ssv2": + expected_shape = torch.Size([1, 1408, 1536]) + expected_slice = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]]) + elif model_name == "videomae-base-short-finetuned-ssv2": + expected_shape = torch.Size([1, 174]) + expected_slice = torch.tensor([-0.0537, -0.1539, -0.3266]) + elif model_name == "videomae-base-ssv2": + expected_shape = torch.Size([1, 1408, 1536]) + expected_slice = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]]) + elif model_name == "videomae-base-finetuned-ssv2": + expected_shape = torch.Size([1, 174]) + expected_slice = torch.tensor([0.1961, -0.8337, -0.6389]) + else: + raise ValueError(f"Model name not supported. Should be one of {model_names}") + + # verify logits + assert logits.shape == expected_shape + if "finetuned" in model_name: + assert torch.allclose(logits[0, :3], expected_slice, atol=1e-4) + else: + print("Logits:", logits[0, :3, :3]) + assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4) + print("Logits ok!") + + # verify loss, if applicable + if model_name == "videomae-base-short": + loss = outputs.loss + assert torch.allclose(loss, expected_loss, atol=1e-4) + print("Loss ok!") + + if pytorch_dump_folder_path is not None: + print(f"Saving model and feature extractor to {pytorch_dump_folder_path}") + feature_extractor.save_pretrained(pytorch_dump_folder_path) + model.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + print("Pushing to the hub...") + model.push_to_hub(model_name, organization="nielsr") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--checkpoint_url", + default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", + type=str, + help=( + "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" + " download link." + ), + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default="/Users/nielsrogge/Documents/VideoMAE/Test", + type=str, + help="Path to the output PyTorch model directory.", + ) + parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + + args = parser.parse_args() + convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub) diff --git a/src/transformers/models/videomae/feature_extraction_videomae.py b/src/transformers/models/videomae/feature_extraction_videomae.py new file mode 100644 index 00000000000000..132dabda8c6833 --- /dev/null +++ b/src/transformers/models/videomae/feature_extraction_videomae.py @@ -0,0 +1,169 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for VideoMAE.""" + +from typing import Optional, Union + +import numpy as np +from PIL import Image + +from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin +from ...image_utils import ImageFeatureExtractionMixin, ImageInput, is_torch_tensor +from ...utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, logging + + +logger = logging.get_logger(__name__) + + +class VideoMAEFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): + r""" + Constructs a VideoMAE feature extractor. This feature extractor can be used to prepare videos for the model. + + This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users + should refer to this superclass for more information regarding those methods. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the shorter edge of the input to a certain `size`. + size (`int`, *optional*, defaults to 224): + Resize the shorter edge of the input to the given size. Only has an effect if `do_resize` is set to `True`. + resample (`int`, *optional*, defaults to `PIL.Image.BILINEAR`): + An optional resampling filter. This can be one of `PIL.Image.NEAREST`, `PIL.Image.BOX`, + `PIL.Image.BILINEAR`, `PIL.Image.HAMMING`, `PIL.Image.BICUBIC` or `PIL.Image.LANCZOS`. Only has an effect + if `do_resize` is set to `True`. + do_center_crop (`bool`, *optional*, defaults to `True`): + Whether to center crop the input to a certain `size`. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether or not to normalize the input with mean and standard deviation. + image_mean (`List[int]`, defaults to `[0.485, 0.456, 0.406]`): + The sequence of means for each channel, to be used when normalizing images. + image_std (`List[int]`, defaults to `[0.229, 0.224, 0.225]`): + The sequence of standard deviations for each channel, to be used when normalizing images. + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize=True, + size=224, + resample=Image.BILINEAR, + do_center_crop=True, + do_normalize=True, + image_mean=None, + image_std=None, + **kwargs + ): + super().__init__(**kwargs) + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_center_crop = do_center_crop + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD + + def resize_video(self, video, size, resample="bilinear"): + return [self.resize(frame, size, resample, default_to_square=False) for frame in video] + + def crop_video(self, video, size): + return [self.center_crop(frame, size) for frame in video] + + def normalize_video(self, video, mean, std): + # video can be a list of PIL images, list of NumPy arrays or list of PyTorch tensors + # first: convert to list of NumPy arrays + video = [self.to_numpy_array(frame) for frame in video] + + # second: stack to get (num_frames, num_channels, height, width) + video = np.stack(video, axis=0) + + # third: normalize + if not isinstance(mean, np.ndarray): + mean = np.array(mean).astype(video.dtype) + if not isinstance(std, np.ndarray): + std = np.array(std).astype(video.dtype) + + return (video - mean[None, :, None, None]) / std[None, :, None, None] + + def __call__( + self, videos: ImageInput, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs + ) -> BatchFeature: + """ + Main method to prepare for the model one or several video(s). + + + + NumPy arrays are converted to PIL images when resizing, so the most efficient is to pass PIL images. + + + + Args: + videos (`List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, `List[List[PIL.Image.Image]]`, `List[List[np.ndarrray]]`,: + `List[List[torch.Tensor]]`): The video or batch of videos to be prepared. Each video should be a list + of frames, which can be either PIL images or NumPy arrays. In case of NumPy arrays/PyTorch tensors, + each frame should be of shape (H, W, C), where H and W are frame height and width, and C is a number of + channels. + + return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'np'`): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, num_frames, + height, width). + """ + # Input type checking for clearer error + valid_videos = False + is_batched = False + + # Check that videos have a valid type + if isinstance(videos, (list, tuple)): + if isinstance(videos[0], (Image.Image, np.ndarray)) or is_torch_tensor(videos[0]): + valid_videos = True + elif isinstance(videos[0], (list, tuple)) and ( + isinstance(videos[0][0], (Image.Image, np.ndarray)) or is_torch_tensor(videos[0][0]) + ): + valid_videos = True + is_batched = True + + if not valid_videos: + raise ValueError( + "Videos must of type `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]` (single" + " example), `List[List[PIL.Image.Image]]`, `List[List[np.ndarray]]`, `List[List[torch.Tensor]]` (batch" + " of examples)." + ) + + if not is_batched: + videos = [videos] + + # transformations (resizing + center cropping + normalization) + if self.do_resize and self.size is not None: + videos = [self.resize_video(video, size=self.size, resample=self.resample) for video in videos] + if self.do_center_crop and self.size is not None: + videos = [self.crop_video(video, size=self.size) for video in videos] + if self.do_normalize: + videos = [self.normalize_video(video, mean=self.image_mean, std=self.image_std) for video in videos] + + # return as BatchFeature + data = {"pixel_values": videos} + encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) + + return encoded_inputs diff --git a/src/transformers/models/videomae/modeling_videomae.py b/src/transformers/models/videomae/modeling_videomae.py new file mode 100644 index 00000000000000..a807ed7208fccb --- /dev/null +++ b/src/transformers/models/videomae/modeling_videomae.py @@ -0,0 +1,1039 @@ +# coding=utf-8 +# Copyright 2022 Multimedia Computing Group, Nanjing University and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch VideoMAE (masked autoencoder) model.""" + + +import collections.abc +import math +from copy import deepcopy +from dataclasses import dataclass +from typing import Optional, Set, Tuple, Union + +import numpy as np +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from ...utils.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD +from .configuration_videomae import VideoMAEConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "VideoMAEConfig" +_CHECKPOINT_FOR_DOC = "MCG-NJU/videomae-base" + +VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "MCG-NJU/videomae-base", + # See all VideoMAE models at https://huggingface.co/models?filter=videomae +] + + +@dataclass +class VideoMAEDecoderOutput(ModelOutput): + """ + Class for VideoMAEDecoder's outputs, with potential hidden states and attentions. + + Args: + logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`): + Pixel reconstruction logits. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer + plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class VideoMAEForPreTrainingOutput(ModelOutput): + """ + Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`): + Pixel reconstruction loss. + logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`): + Pixel reconstruction logits. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer + plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +# sin-cos position encoding +# https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 +def get_sinusoid_encoding_table(n_position, d_hid): + """Sinusoid position encoding table""" + # TODO: make it with torch instead of numpy + def get_position_angle_vec(position): + return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] + + sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) + sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i + sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 + + return torch.FloatTensor(sinusoid_table).unsqueeze(0) + + +class VideoMAEEmbeddings(nn.Module): + """ + Construct the patch and position embeddings. + + """ + + def __init__(self, config): + super().__init__() + + self.patch_embeddings = VideoMAEPatchEmbeddings(config) + self.num_patches = self.patch_embeddings.num_patches + # fixed sin-cos embedding + self.position_embeddings = get_sinusoid_encoding_table(self.num_patches, config.hidden_size) + self.config = config + + def forward(self, pixel_values, bool_masked_pos): + # create patch embeddings + embeddings = self.patch_embeddings(pixel_values) + + # add position embeddings + embeddings = embeddings + self.position_embeddings.type_as(embeddings).to(embeddings.device).clone().detach() + + # only keep visible patches + # ~bool_masked_pos means visible + if bool_masked_pos is not None: + batch_size, _, num_channels = embeddings.shape + embeddings = embeddings[~bool_masked_pos] + embeddings = embeddings.reshape(batch_size, -1, num_channels) + + return embeddings + + +class VideoMAEPatchEmbeddings(nn.Module): + """ + Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels, + height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder. + + The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width // + patch_size). + + """ + + def __init__(self, config): + super().__init__() + + image_size = config.image_size + patch_size = config.patch_size + num_channels = config.num_channels + hidden_size = config.hidden_size + num_frames = config.num_frames + tubelet_size = config.tubelet_size + + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + self.image_size = image_size + self.patch_size = patch_size + self.tubelet_size = int(tubelet_size) + num_patches = ( + (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) * (num_frames // self.tubelet_size) + ) + self.num_channels = num_channels + self.num_patches = num_patches + self.projection = nn.Conv3d( + in_channels=num_channels, + out_channels=hidden_size, + kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]), + stride=(self.tubelet_size, patch_size[0], patch_size[1]), + ) + + def forward(self, pixel_values): + batch_size, num_frames, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + if height != self.image_size[0] or width != self.image_size[1]: + raise ValueError( + f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." + ) + # permute to (batch_size, num_channels, num_frames, height, width) + pixel_values = pixel_values.permute(0, 2, 1, 3, 4) + embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) + return embeddings + + +class VideoMAESelfAttention(nn.Module): + def __init__(self, config: VideoMAEConfig) -> None: + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " + f"heads {config.num_attention_heads}." + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=False) + self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=False) + self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=False) + + if config.qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(self.all_head_size)) + self.v_bias = nn.Parameter(torch.zeros(self.all_head_size)) + else: + self.q_bias = None + self.v_bias = None + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + + k_bias = torch.zeros_like(self.v_bias, requires_grad=False) if self.q_bias is not None else None + keys = nn.functional.linear(input=hidden_states, weight=self.key.weight, bias=k_bias) + values = nn.functional.linear(input=hidden_states, weight=self.value.weight, bias=self.v_bias) + queries = nn.functional.linear(input=hidden_states, weight=self.query.weight, bias=self.q_bias) + + key_layer = self.transpose_for_scores(keys) + value_layer = self.transpose_for_scores(values) + query_layer = self.transpose_for_scores(queries) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->VideoMAE +class VideoMAESelfOutput(nn.Module): + """ + The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the + layernorm applied before each block. + """ + + def __init__(self, config: VideoMAEConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->VideoMAE +class VideoMAEAttention(nn.Module): + def __init__(self, config: VideoMAEConfig) -> None: + super().__init__() + self.attention = VideoMAESelfAttention(config) + self.output = VideoMAESelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads: Set[int]) -> None: + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.attention.query = prune_linear_layer(self.attention.query, index) + self.attention.key = prune_linear_layer(self.attention.key, index) + self.attention.value = prune_linear_layer(self.attention.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) + self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_outputs = self.attention(hidden_states, head_mask, output_attentions) + + attention_output = self.output(self_outputs[0], hidden_states) + + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTIntermediate ViT->VideoMAE +class VideoMAEIntermediate(nn.Module): + def __init__(self, config: VideoMAEConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTOutput ViT->VideoMAE +class VideoMAEOutput(nn.Module): + def __init__(self, config: VideoMAEConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + hidden_states = hidden_states + input_tensor + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->VideoMAE +class VideoMAELayer(nn.Module): + """This corresponds to the Block class in the timm implementation.""" + + def __init__(self, config: VideoMAEConfig) -> None: + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = VideoMAEAttention(config) + self.intermediate = VideoMAEIntermediate(config) + self.output = VideoMAEOutput(config) + self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_attention_outputs = self.attention( + self.layernorm_before(hidden_states), # in VideoMAE, layernorm is applied before self-attention + head_mask, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + # first residual connection + hidden_states = attention_output + hidden_states + + # in VideoMAE, layernorm is also applied after self-attention + layer_output = self.layernorm_after(hidden_states) + layer_output = self.intermediate(layer_output) + + # second residual connection is done here + layer_output = self.output(layer_output, hidden_states) + + outputs = (layer_output,) + outputs + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->VideoMAE +class VideoMAEEncoder(nn.Module): + def __init__(self, config: VideoMAEConfig) -> None: + super().__init__() + self.config = config + self.layer = nn.ModuleList([VideoMAELayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> Union[tuple, BaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + layer_head_mask, + ) + else: + layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class VideoMAEPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = VideoMAEConfig + base_model_prefix = "videomae" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv3d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, VideoMAEEncoder): + module.gradient_checkpointing = value + + +VIDEOMAE_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`VideoMAEConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +VIDEOMAE_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`VideoMAEFeatureExtractor`]. See + [`VideoMAEFeatureExtractor.__call__`] for details. + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare VideoMAE Model transformer outputting raw hidden-states without any specific head on top.", + VIDEOMAE_START_DOCSTRING, +) +class VideoMAEModel(VideoMAEPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + + self.embeddings = VideoMAEEmbeddings(config) + self.encoder = VideoMAEEncoder(config) + + if config.use_mean_pooling: + self.layernorm = None + else: + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values, + bool_masked_pos=None, + head_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + Examples: + + ```python + >>> from decord import VideoReader, cpu + >>> import numpy as np + + >>> from transformers import VideoMAEFeatureExtractor, VideoMAEModel + >>> from huggingface_hub import hf_hub_download + + + >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): + ... converted_len = int(clip_len * frame_sample_rate) + ... end_idx = np.random.randint(converted_len, seg_len) + ... start_idx = end_idx - converted_len + ... indices = np.linspace(start_idx, end_idx, num=clip_len) + ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) + ... return indices + + + >>> # video clip consists of 300 frames (10 seconds at 30 FPS) + >>> file_path = hf_hub_download( + ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" + ... ) + >>> vr = VideoReader(file_path, num_threads=1, ctx=cpu(0)) + + >>> # sample 16 frames + >>> vr.seek(0) + >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=4, seg_len=len(vr)) + >>> buffer = vr.get_batch(indices).asnumpy() + + >>> # create a list of NumPy arrays + >>> video = [buffer[i] for i in range(buffer.shape[0])] + + >>> feature_extractor = VideoMAEFeatureExtractor.from_pretrained("MCG-NJU/videomae-base") + >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base") + + >>> # prepare video for the model + >>> inputs = feature_extractor(video, return_tensors="pt") + + >>> # forward pass + >>> outputs = model(**inputs) + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + [1, 1568, 768] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings(pixel_values, bool_masked_pos) + + encoder_outputs = self.encoder( + embedding_output, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + if self.layernorm is not None: + sequence_output = self.layernorm(sequence_output) + + if not return_dict: + return (sequence_output,) + encoder_outputs[1:] + + return BaseModelOutput( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class VideoMAEDecoder(nn.Module): + def __init__(self, config, num_patches): + super().__init__() + + decoder_num_labels = config.num_channels * config.tubelet_size * config.patch_size**2 + + decoder_config = deepcopy(config) + decoder_config.hidden_size = config.decoder_hidden_size + decoder_config.num_hidden_layers = config.decoder_num_hidden_layers + decoder_config.num_attention_heads = config.decoder_num_attention_heads + decoder_config.intermediate_size = config.decoder_intermediate_size + self.decoder_layers = nn.ModuleList( + [VideoMAELayer(decoder_config) for _ in range(config.decoder_num_hidden_layers)] + ) + + self.norm = nn.LayerNorm(config.decoder_hidden_size) + self.head = ( + nn.Linear(config.decoder_hidden_size, decoder_num_labels) if decoder_num_labels > 0 else nn.Identity() + ) + + self.gradient_checkpointing = False + self.config = config + + def forward( + self, + hidden_states, + return_token_num, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ): + # apply Transformer layers (blocks) + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + for i, layer_module in enumerate(self.decoder_layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + None, + ) + else: + layer_outputs = layer_module(hidden_states, head_mask=None, output_attentions=output_attentions) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if return_token_num > 0: + hidden_states = hidden_states[:, -return_token_num:] + + # predictor projection + hidden_states = self.norm(hidden_states) + logits = self.head(hidden_states) + + if not return_dict: + return tuple(v for v in [logits, all_hidden_states, all_self_attentions] if v is not None) + return VideoMAEDecoderOutput(logits=logits, hidden_states=all_hidden_states, attentions=all_self_attentions) + + +@add_start_docstrings( + "The VideoMAE Model transformer with the decoder on top for self-supervised pre-training.", + VIDEOMAE_START_DOCSTRING, +) +class VideoMAEForPreTraining(VideoMAEPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + + self.videomae = VideoMAEModel(config) + + self.encoder_to_decoder = nn.Linear(config.hidden_size, config.decoder_hidden_size, bias=False) + self.mask_token = nn.Parameter(torch.zeros(1, 1, config.decoder_hidden_size)) + self.position_embeddings = get_sinusoid_encoding_table( + self.videomae.embeddings.num_patches, config.decoder_hidden_size + ) + + self.decoder = VideoMAEDecoder(config, num_patches=self.videomae.embeddings.num_patches) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=VideoMAEForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values, + bool_masked_pos, + head_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + Examples: + ```python + >>> from transformers import VideoMAEFeatureExtractor, VideoMAEForPreTraining + >>> import numpy as np + >>> import torch + + >>> num_frames = 16 + >>> video = list(np.random.randn(16, 3, 224, 224)) + + >>> feature_extractor = VideoMAEFeatureExtractor.from_pretrained("MCG-NJU/videomae-base") + >>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base") + + >>> pixel_values = feature_extractor(video, return_tensors="pt").pixel_values + + >>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2 + >>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame + >>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool() + + >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos) + >>> loss = outputs.loss + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.videomae( + pixel_values, + bool_masked_pos=bool_masked_pos, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + sequence_output = self.encoder_to_decoder( + sequence_output + ) # [batch_size, num_visible_patches, decoder_hidden_size] + batch_size, seq_len, num_channels = sequence_output.shape + + # we don't unshuffle the correct visible token order, but shuffle the position embeddings accordingly. + if bool_masked_pos is None: + raise ValueError("One must provided a boolean mask ") + expanded_position_embeddings = self.position_embeddings.expand(batch_size, -1, -1).type_as(pixel_values) + expanded_position_embeddings = expanded_position_embeddings.to(pixel_values.device).clone().detach() + pos_emb_visible = expanded_position_embeddings[~bool_masked_pos].reshape(batch_size, -1, num_channels) + pos_emb_mask = expanded_position_embeddings[bool_masked_pos].reshape(batch_size, -1, num_channels) + + # [batch_size, num_patches, decoder_hidden_size] + x_full = torch.cat([sequence_output + pos_emb_visible, self.mask_token + pos_emb_mask], dim=1) + + # [batch_size, num_masked_patches, num_channels * patch_size * patch_size] + decoder_outputs = self.decoder(x_full, pos_emb_mask.shape[1]) + logits = decoder_outputs.logits + + loss = None + with torch.no_grad(): + # calculate the labels to be predicted + # first, unnormalize the frames + device = pixel_values.device + mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, None, :, None, None] + std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, None, :, None, None] + frames = pixel_values * std + mean # in [0, 1] + + batch_size, time, num_channels, height, width = frames.shape + tubelet_size, patch_size = self.config.tubelet_size, self.config.patch_size + if self.config.norm_pix_loss: + # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size) + frames = frames.view( + batch_size, + time // tubelet_size, + tubelet_size, + num_channels, + height // patch_size, + patch_size, + width // patch_size, + patch_size, + ) + # step 2: move dimensions to concatenate: + frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous() + # step 3: concatenate: + frames = frames.view( + batch_size, + time // tubelet_size * height // patch_size * width // patch_size, + tubelet_size * patch_size * patch_size, + num_channels, + ) + # step 4: normalize. The authors find that the mean is about 0.48 and standard deviation is about 0.08. + frames_norm = (frames - frames.mean(dim=-2, keepdim=True)) / ( + frames.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6 + ) + # step 5: reshape to (batch_size, T//ts * H//ps * W//ps, ts * ps * ps * C) + videos_patch = frames_norm.view( + batch_size, + time // tubelet_size * height // patch_size * width // patch_size, + tubelet_size * patch_size * patch_size * num_channels, + ) + else: + # step 1: split up dimensions (time by tubelet_size, height by patch_size, width by patch_size) + frames = frames.view( + batch_size, + time // tubelet_size, + tubelet_size, + num_channels, + height // patch_size, + patch_size, + width // patch_size, + patch_size, + ) + # step 2: move dimensions to concatenate: (batch_size, T//ts, H//ps, W//ps, ts, ps, ps, C) + frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous() + # step 3: concatenate + videos_patch = frames.view( + batch_size, + time // tubelet_size * height // patch_size * width // patch_size, + tubelet_size * patch_size * patch_size * num_channels, + ) + + batch_size, _, num_channels = videos_patch.shape + labels = videos_patch[bool_masked_pos].reshape(batch_size, -1, num_channels) + + loss_fct = MSELoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return VideoMAEForPreTrainingOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """VideoMAE Model transformer with a video classification head on top (a linear layer on top of the final hidden state of + the [CLS] token) e.g. for ImageNet.""", + VIDEOMAE_START_DOCSTRING, +) +class VideoMAEForVideoClassification(VideoMAEPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.num_labels = config.num_labels + self.videomae = VideoMAEModel(config) + + # Classifier head + self.fc_norm = nn.LayerNorm(config.hidden_size) if config.use_mean_pooling else None + self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(VIDEOMAE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + + Returns: + + Examples: + + ```python + >>> from decord import VideoReader, cpu + >>> import torch + + >>> from transformers import VideoMAEFeatureExtractor, VideoMAEForVideoClassification + >>> from huggingface_hub import hf_hub_download + + + >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): + ... converted_len = int(clip_len * frame_sample_rate) + ... end_idx = np.random.randint(converted_len, seg_len) + ... start_idx = end_idx - converted_len + ... indices = np.linspace(start_idx, end_idx, num=clip_len) + ... indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64) + ... return indices + + + >>> # video clip consists of 300 frames (10 seconds at 30 FPS) + >>> file_path = hf_hub_download( + ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" + ... ) + >>> vr = VideoReader(file_path, num_threads=1, ctx=cpu(0)) + + >>> # sample 16 frames + >>> vr.seek(0) + >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=4, seg_len=len(vr)) + >>> buffer = vr.get_batch(indices).asnumpy() + + >>> # create a list of NumPy arrays + >>> video = [buffer[i] for i in range(buffer.shape[0])] + + >>> feature_extractor = VideoMAEFeatureExtractor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics") + >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics") + + >>> inputs = feature_extractor(video, return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**inputs) + ... logits = outputs.logits + + >>> # model predicts one of the 400 Kinetics-400 classes + >>> predicted_label = logits.argmax(-1).item() + >>> print(model.config.id2label[predicted_label]) + eating spaghetti + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.videomae( + pixel_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + if self.fc_norm is not None: + sequence_output = self.fc_norm(sequence_output.mean(1)) + else: + sequence_output = sequence_output[:, 0] + + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 2334c351cc5120..377932e2d490e7 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -22,6 +22,7 @@ from packaging import version from .. import __version__ +from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD from .doc import ( add_code_sample_docstrings, add_end_docstrings, diff --git a/src/transformers/utils/constants.py b/src/transformers/utils/constants.py new file mode 100644 index 00000000000000..af2e48ab0a8be3 --- /dev/null +++ b/src/transformers/utils/constants.py @@ -0,0 +1,4 @@ +IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406] +IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225] +IMAGENET_STANDARD_MEAN = [0.5, 0.5, 0.5] +IMAGENET_STANDARD_STD = [0.5, 0.5, 0.5] diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index c1dfc6b6b7ca64..d636be655af284 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -406,6 +406,9 @@ def load_tf_weights_in_albert(*args, **kwargs): MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None +MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = None + + MODEL_FOR_VISION_2_SEQ_MAPPING = None @@ -572,6 +575,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class AutoModelForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class AutoModelForVision2Seq(metaclass=DummyObject): _backends = ["torch"] @@ -4753,6 +4763,37 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class VideoMAEForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VideoMAEForVideoClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VideoMAEModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class VideoMAEPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + VILT_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index e5d2bced9e0415..30228e022222bf 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -150,6 +150,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) +class VideoMAEFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + class ViltFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] diff --git a/tests/models/videomae/__init__.py b/tests/models/videomae/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/videomae/test_feature_extraction_videomae.py b/tests/models/videomae/test_feature_extraction_videomae.py new file mode 100644 index 00000000000000..cfe00f51e5e529 --- /dev/null +++ b/tests/models/videomae/test_feature_extraction_videomae.py @@ -0,0 +1,202 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import numpy as np + +from transformers.testing_utils import require_torch, require_vision +from transformers.utils import is_torch_available, is_vision_available + +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_video_inputs + + +if is_torch_available(): + import torch + +if is_vision_available(): + from PIL import Image + + from transformers import VideoMAEFeatureExtractor + + +class VideoMAEFeatureExtractionTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + num_channels=3, + num_frames=10, + image_size=18, + min_resolution=30, + max_resolution=400, + do_resize=True, + size=18, + do_normalize=True, + image_mean=[0.5, 0.5, 0.5], + image_std=[0.5, 0.5, 0.5], + ): + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.num_frames = num_frames + self.image_size = image_size + self.min_resolution = min_resolution + self.max_resolution = max_resolution + self.do_resize = do_resize + self.size = size + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + + def prepare_feat_extract_dict(self): + return { + "image_mean": self.image_mean, + "image_std": self.image_std, + "do_normalize": self.do_normalize, + "do_resize": self.do_resize, + "size": self.size, + } + + +@require_torch +@require_vision +class VideoMAEFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): + + feature_extraction_class = VideoMAEFeatureExtractor if is_vision_available() else None + + def setUp(self): + self.feature_extract_tester = VideoMAEFeatureExtractionTester(self) + + @property + def feat_extract_dict(self): + return self.feature_extract_tester.prepare_feat_extract_dict() + + def test_feat_extract_properties(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + self.assertTrue(hasattr(feature_extractor, "image_mean")) + self.assertTrue(hasattr(feature_extractor, "image_std")) + self.assertTrue(hasattr(feature_extractor, "do_normalize")) + self.assertTrue(hasattr(feature_extractor, "do_resize")) + self.assertTrue(hasattr(feature_extractor, "size")) + + def test_batch_feature(self): + pass + + def test_call_pil(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PIL videos + video_inputs = prepare_video_inputs(self.feature_extract_tester, equal_resolution=False) + for video in video_inputs: + self.assertIsInstance(video, list) + self.assertIsInstance(video[0], Image.Image) + + # Test not batched input + encoded_videos = feature_extractor(video_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + 1, + self.feature_extract_tester.num_frames, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size, + self.feature_extract_tester.size, + ), + ) + + # Test batched + encoded_videos = feature_extractor(video_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_frames, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size, + self.feature_extract_tester.size, + ), + ) + + def test_call_numpy(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random numpy tensors + video_inputs = prepare_video_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + for video in video_inputs: + self.assertIsInstance(video, list) + self.assertIsInstance(video[0], np.ndarray) + + # Test not batched input + encoded_videos = feature_extractor(video_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + 1, + self.feature_extract_tester.num_frames, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size, + self.feature_extract_tester.size, + ), + ) + + # Test batched + encoded_videos = feature_extractor(video_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_frames, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size, + self.feature_extract_tester.size, + ), + ) + + def test_call_pytorch(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PyTorch tensors + video_inputs = prepare_video_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + for video in video_inputs: + self.assertIsInstance(video, list) + self.assertIsInstance(video[0], torch.Tensor) + + # Test not batched input + encoded_videos = feature_extractor(video_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + 1, + self.feature_extract_tester.num_frames, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size, + self.feature_extract_tester.size, + ), + ) + + # Test batched + encoded_videos = feature_extractor(video_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_videos.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_frames, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size, + self.feature_extract_tester.size, + ), + ) diff --git a/tests/models/videomae/test_modeling_videomae.py b/tests/models/videomae/test_modeling_videomae.py new file mode 100644 index 00000000000000..adce62021c9ded --- /dev/null +++ b/tests/models/videomae/test_modeling_videomae.py @@ -0,0 +1,421 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch VideoMAE model. """ + + +import copy +import inspect +import unittest + +import numpy as np + +from huggingface_hub import hf_hub_download +from transformers import VideoMAEConfig +from transformers.models.auto import get_values +from transformers.testing_utils import require_torch, require_vision, slow, torch_device +from transformers.utils import cached_property, is_torch_available, is_vision_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor + + +if is_torch_available(): + import torch + from torch import nn + + from transformers import ( + MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, + VideoMAEForPreTraining, + VideoMAEForVideoClassification, + VideoMAEModel, + ) + from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from transformers import VideoMAEFeatureExtractor + + +class VideoMAEModelTester: + def __init__( + self, + parent, + batch_size=13, + image_size=10, + num_channels=3, + patch_size=2, + tubelet_size=2, + num_frames=2, + is_training=True, + use_labels=True, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + type_sequence_label_size=10, + initializer_range=0.02, + mask_ratio=0.9, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.num_channels = num_channels + self.patch_size = patch_size + self.tubelet_size = tubelet_size + self.num_frames = num_frames + self.is_training = is_training + self.use_labels = use_labels + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.mask_ratio = mask_ratio + self.scope = scope + + # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame + self.num_patches_per_frame = (image_size // patch_size) ** 2 + self.seq_length = (num_frames // tubelet_size) * self.num_patches_per_frame + + # use this variable to define bool_masked_pos + self.num_masks = int(mask_ratio * self.seq_length) + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor( + [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] + ) + + labels = None + if self.use_labels: + labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + + config = self.get_config() + + return config, pixel_values, labels + + def get_config(self): + return VideoMAEConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + num_frames=self.num_frames, + tubelet_size=self.tubelet_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + is_decoder=False, + initializer_range=self.initializer_range, + ) + + def create_and_check_model(self, config, pixel_values, labels): + model = VideoMAEModel(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + + def create_and_check_for_pretraining(self, config, pixel_values, labels): + model = VideoMAEForPreTraining(config) + model.to(torch_device) + model.eval() + # important: each video needs to have the same number of masked patches + # hence we define a single mask, which we then repeat for each example in the batch + mask = torch.ones((self.num_masks,)) + mask = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))]) + bool_masked_pos = mask.expand(self.batch_size, -1).bool() + + result = model(pixel_values, bool_masked_pos) + # model only returns predictions for masked patches + num_masked_patches = mask.sum().item() + decoder_num_labels = 3 * self.tubelet_size * self.patch_size**2 + self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values, labels = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_torch +class VideoMAEModelTest(ModelTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as VideoMAE does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = ( + (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () + ) + + test_pruning = False + test_torchscript = False + test_resize_embeddings = False + test_head_masking = False + + def setUp(self): + self.model_tester = VideoMAEModelTester(self) + self.config_tester = ConfigTester(self, config_class=VideoMAEConfig, has_text_modality=False, hidden_size=37) + + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + inputs_dict = copy.deepcopy(inputs_dict) + + if model_class == VideoMAEForPreTraining: + # important: each video needs to have the same number of masked patches + # hence we define a single mask, which we then repeat for each example in the batch + mask = torch.ones((self.model_tester.num_masks,)) + mask = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))]) + bool_masked_pos = mask.expand(self.model_tester.batch_size, -1).bool() + inputs_dict["bool_masked_pos"] = bool_masked_pos.to(torch_device) + + if return_labels: + if model_class in [ + *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING), + ]: + inputs_dict["labels"] = torch.zeros( + self.model_tester.batch_size, dtype=torch.long, device=torch_device + ) + + return inputs_dict + + def test_config(self): + self.config_tester.run_common_tests() + + @unittest.skip(reason="VideoMAE does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + def test_model_common_attributes(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) + x = model.get_output_embeddings() + self.assertTrue(x is None or isinstance(x, nn.Linear)) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_for_pretraining(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_pretraining(*config_and_inputs) + + @slow + def test_model_from_pretrained(self): + for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = VideoMAEModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + def test_attention_outputs(self): + if not self.has_attentions: + pass + + else: + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + for model_class in self.all_model_classes: + num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks + seq_len = ( + num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length + ) + + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + self.assertListEqual( + list(attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, seq_len, seq_len], + ) + out_len = len(outputs) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + self.assertEqual(out_len + 1, len(outputs)) + + self_attentions = outputs.attentions + + self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(self_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, seq_len, seq_len], + ) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.hidden_states + expected_num_layers = self.model_tester.num_hidden_layers + 1 + self.assertEqual(len(hidden_states), expected_num_layers) + + num_visible_patches = self.model_tester.seq_length - self.model_tester.num_masks + seq_length = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [seq_length, self.model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + +# We will verify our results on a video of eating spaghetti +# Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] +def prepare_video(): + file = hf_hub_download(repo_id="datasets/hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy") + video = np.load(file) + return list(video) + + +@require_torch +@require_vision +class VideoMAEModelIntegrationTest(unittest.TestCase): + @cached_property + def default_feature_extractor(self): + # logits were tested with a different mean and std, so we use the same here + return ( + VideoMAEFeatureExtractor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) + if is_vision_available() + else None + ) + + @slow + def test_inference_for_video_classification(self): + model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics").to( + torch_device + ) + + feature_extractor = self.default_feature_extractor + video = prepare_video() + inputs = feature_extractor(video, return_tensors="pt").to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + # verify the logits + expected_shape = torch.Size((1, 400)) + self.assertEqual(outputs.logits.shape, expected_shape) + + expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]).to(torch_device) + + self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) + + @slow + def test_inference_for_pretraining(self): + model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short").to(torch_device) + + feature_extractor = self.default_feature_extractor + video = prepare_video() + inputs = feature_extractor(video, return_tensors="pt").to(torch_device) + + # add boolean mask, indicating which patches to mask + local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") + inputs["bool_masked_pos"] = torch.load(local_path) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + # verify the logits + expected_shape = torch.Size([1, 1408, 1536]) + expected_slice = torch.tensor( + [[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]], device=torch_device + ) + self.assertEqual(outputs.logits.shape, expected_shape) + self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice, atol=1e-4)) + + # verify the loss (`config.norm_pix_loss` = `True`) + expected_loss = torch.tensor([0.5142], device=torch_device) + self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4)) + + # verify the loss (`config.norm_pix_loss` = `False`) + model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short", norm_pix_loss=False).to( + torch_device + ) + + with torch.no_grad(): + outputs = model(**inputs) + + expected_loss = torch.tensor(torch.tensor([0.6469]), device=torch_device) + self.assertTrue(torch.allclose(outputs.loss, expected_loss, atol=1e-4)) diff --git a/tests/test_feature_extraction_common.py b/tests/test_feature_extraction_common.py index 16ab3c6459544c..a822b75cc5eb62 100644 --- a/tests/test_feature_extraction_common.py +++ b/tests/test_feature_extraction_common.py @@ -48,49 +48,91 @@ def prepare_image_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): """This function prepares a list of PIL images, or a list of numpy arrays if one specifies numpify=True, or a list of PyTorch tensors if one specifies torchify=True. + + One can specify whether the images are of the same resolution or not. """ assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" - if equal_resolution: - image_inputs = [] - for i in range(feature_extract_tester.batch_size): - image_inputs.append( - np.random.randint( - 255, - size=( - feature_extract_tester.num_channels, - feature_extract_tester.max_resolution, - feature_extract_tester.max_resolution, - ), - dtype=np.uint8, - ) - ) - else: - image_inputs = [] - - # To avoid getting image width/height 0 - min_resolution = feature_extract_tester.min_resolution - if getattr(feature_extract_tester, "size_divisor", None): - # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` - min_resolution = max(feature_extract_tester.size_divisor, min_resolution) - - for i in range(feature_extract_tester.batch_size): + image_inputs = [] + for i in range(feature_extract_tester.batch_size): + if equal_resolution: + width = height = feature_extract_tester.max_resolution + else: + # To avoid getting image width/height 0 + min_resolution = feature_extract_tester.min_resolution + if getattr(feature_extract_tester, "size_divisor", None): + # If `size_divisor` is defined, the image needs to have width/size >= `size_divisor` + min_resolution = max(feature_extract_tester.size_divisor, min_resolution) width, height = np.random.choice(np.arange(min_resolution, feature_extract_tester.max_resolution), 2) - image_inputs.append( - np.random.randint(255, size=(feature_extract_tester.num_channels, width, height), dtype=np.uint8) + image_inputs.append( + np.random.randint( + 255, + size=( + feature_extract_tester.num_channels, + width, + height, + ), + dtype=np.uint8, ) + ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension - image_inputs = [Image.fromarray(np.moveaxis(x, 0, -1)) for x in image_inputs] + image_inputs = [Image.fromarray(np.moveaxis(image, 0, -1)) for image in image_inputs] if torchify: - image_inputs = [torch.from_numpy(x) for x in image_inputs] + image_inputs = [torch.from_numpy(image) for image in image_inputs] return image_inputs +def prepare_video(feature_extract_tester, width=10, height=10, numpify=False, torchify=False): + """This function prepares a video as a list of PIL images/NumPy arrays/PyTorch tensors.""" + + video = [] + for i in range(feature_extract_tester.num_frames): + video.append(np.random.randint(255, size=(feature_extract_tester.num_channels, width, height), dtype=np.uint8)) + + if not numpify and not torchify: + # PIL expects the channel dimension as last dimension + video = [Image.fromarray(np.moveaxis(frame, 0, -1)) for frame in video] + + if torchify: + video = [torch.from_numpy(frame) for frame in video] + + return video + + +def prepare_video_inputs(feature_extract_tester, equal_resolution=False, numpify=False, torchify=False): + """This function prepares a batch of videos: a list of list of PIL images, or a list of list of numpy arrays if + one specifies numpify=True, or a list of list of PyTorch tensors if one specifies torchify=True. + + One can specify whether the videos are of the same resolution or not. + """ + + assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" + + video_inputs = [] + for i in range(feature_extract_tester.batch_size): + if equal_resolution: + width = height = feature_extract_tester.max_resolution + else: + width, height = np.random.choice( + np.arange(feature_extract_tester.min_resolution, feature_extract_tester.max_resolution), 2 + ) + video = prepare_video( + feature_extract_tester=feature_extract_tester, + width=width, + height=height, + numpify=numpify, + torchify=torchify, + ) + video_inputs.append(video) + + return video_inputs + + class FeatureExtractionSavingTestMixin: def test_feat_extract_to_json_string(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index a86b33e88ff2a0..c05771336e6365 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -99,6 +99,7 @@ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, MODEL_MAPPING, AdaptiveEmbedding, AutoModelForCausalLM, @@ -182,6 +183,7 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): *get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING), *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), + *get_values(MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device From d2704c4143d5733a3588c6cd648d210c8e274b6b Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 4 Aug 2022 18:52:01 +0200 Subject: [PATCH 015/539] Add machine type in the artifact of Examples directory job (#18459) Co-authored-by: ydshieh --- .github/workflows/self-scheduled.yml | 8 ++++---- utils/notification_service.py | 7 ++++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index c1a4f3fd371d66..323ca5eb54db23 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -187,19 +187,19 @@ jobs: working-directory: /transformers run: | pip install -r examples/pytorch/_tests_requirements.txt - python3 -m pytest -v --make-reports=examples_gpu examples/pytorch + python3 -m pytest -v --make-reports=single-gpu_examples_gpu examples/pytorch - name: Failure short reports if: ${{ failure() }} continue-on-error: true - run: cat /transformers/reports/examples_gpu/failures_short.txt + run: cat /transformers/reports/single-gpu_examples_gpu/failures_short.txt - name: Test suite reports artifacts if: ${{ always() }} uses: actions/upload-artifact@v2 with: - name: run_examples_gpu - path: /transformers/reports/examples_gpu + name: single-gpu_run_examples_gpu + path: /transformers/reports/single-gpu_examples_gpu run_pipelines_torch_gpu: name: PyTorch pipelines diff --git a/utils/notification_service.py b/utils/notification_service.py index 6316c5298dbc7e..4918b4a459ac38 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -448,7 +448,12 @@ def get_reply_blocks(self, job_name, job_result, failures, device, text): content = {"type": "section", "text": {"type": "mrkdwn", "text": text}} - if job_result["job_link"] is not None: + # TODO: Make sure we always have a valid job link (or at least a way not to break the report sending) + # Currently we get the device from a job's artifact name. + # If a device is found, the job name should contain the device type, for example, `XXX (single-gpu)`. + # This could be done by adding `machine_type` in a job's `strategy`. + # (If `job_result["job_link"][device]` is `None`, we get an error: `... [ERROR] must provide a string ...`) + if job_result["job_link"] is not None and job_result["job_link"][device] is not None: content["accessory"] = { "type": "button", "text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True}, From 0bf1e1aca46176fca99b46798867fcd7e9d75791 Mon Sep 17 00:00:00 2001 From: Kian Sierra McGettigan <47116198+kiansierra@users.noreply.github.com> Date: Thu, 4 Aug 2022 19:22:19 +0200 Subject: [PATCH 016/539] Update no trainer examples for QA and Semantic Segmentation (#18474) * swag_no_trainer updated for with gather_metrics * Removed unused variable samples_seen * updated examples with gather_for_metrics --- .../run_qa_beam_search_no_trainer.py | 22 +++++++++---------- .../question-answering/run_qa_no_trainer.py | 10 ++++----- .../run_semantic_segmentation_no_trainer.py | 11 +--------- 3 files changed, 17 insertions(+), 26 deletions(-) diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index e6c66e379a9650..69ddf24ab5aa49 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -698,7 +698,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): step = 0 # create a numpy array and fill it with -100. logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32) - # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather + # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather_for_metrics for i, output_logit in enumerate(start_or_end_logits): # populate columns # We have to fill it such that we have to take the whole tensor and replace it on the newly created array # And after every iteration we have to change the step @@ -876,11 +876,11 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100) - all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy()) - all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy()) - all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy()) - all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy()) - all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy()) + all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy()) + all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy()) + all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy()) + all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy()) + all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor @@ -936,11 +936,11 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100) - all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy()) - all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy()) - all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy()) - all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy()) - all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy()) + all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy()) + all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy()) + all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy()) + all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy()) + all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index ec86d95b5e5962..d98dca22bf2e48 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -715,7 +715,7 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): step = 0 # create a numpy array and fill it with -100. logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64) - # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather + # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather_for_metrics for i, output_logit in enumerate(start_or_end_logits): # populate columns # We have to fill it such that we have to take the whole tensor and replace it on the newly created array # And after every iteration we have to change the step @@ -901,8 +901,8 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) - all_start_logits.append(accelerator.gather(start_logits).cpu().numpy()) - all_end_logits.append(accelerator.gather(end_logits).cpu().numpy()) + all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy()) + all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor @@ -940,8 +940,8 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) - all_start_logits.append(accelerator.gather(start_logits).cpu().numpy()) - all_end_logits.append(accelerator.gather(end_logits).cpu().numpy()) + all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy()) + all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor # concatenate the numpy array diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index 237934b762d5b6..7ffb876d4db58f 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -605,7 +605,6 @@ def preprocess_val(example_batch): logger.info("***** Running evaluation *****") model.eval() - samples_seen = 0 for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)): with torch.no_grad(): outputs = model(**batch) @@ -615,15 +614,7 @@ def preprocess_val(example_batch): ) predictions = upsampled_logits.argmax(dim=1) - predictions, references = accelerator.gather((predictions, batch["labels"])) - - # If we are in a multiprocess environment, the last batch has duplicates - if accelerator.num_processes > 1: - if step == len(eval_dataloader) - 1: - predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] - references = references[: len(eval_dataloader.dataset) - samples_seen] - else: - samples_seen += references.shape[0] + predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, From 14928921e2f6d5b049d8dcfa07982e9ca351a402 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 4 Aug 2022 20:41:15 +0200 Subject: [PATCH 017/539] Add `TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING` (#18469) Co-authored-by: ydshieh --- src/transformers/__init__.py | 2 ++ src/transformers/models/auto/__init__.py | 2 ++ .../data2vec/modeling_tf_data2vec_vision.py | 4 ++-- .../models/segformer/modeling_tf_segformer.py | 16 ++++++++-------- src/transformers/utils/dummy_tf_objects.py | 3 +++ .../segformer/test_modeling_tf_segformer.py | 4 ++++ tests/test_modeling_tf_common.py | 18 ++++++++++++++++-- 7 files changed, 37 insertions(+), 12 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index e8cfd47f3d3b37..5e1e95c6291b78 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -2088,6 +2088,7 @@ "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "TF_MODEL_FOR_PRETRAINING_MAPPING", "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", "TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", @@ -4582,6 +4583,7 @@ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index b04c2420ef963e..139d4feda336e0 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -111,6 +111,7 @@ "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "TF_MODEL_FOR_PRETRAINING_MAPPING", "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", "TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING", @@ -253,6 +254,7 @@ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, diff --git a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py index e09cbfb9c42a6e..33e9921cc9a58c 100644 --- a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py @@ -1352,8 +1352,8 @@ def masked_loss(real, pred): loss_ = loss_fct(real, pred) mask = tf.cast(mask, dtype=loss_.dtype) loss_ *= mask - - return tf.reduce_sum(loss_) / tf.reduce_sum(mask) + reduced_masked_loss = tf.reduce_sum(loss_) / tf.reduce_sum(mask) + return tf.reshape(reduced_masked_loss, (1,)) main_loss = masked_loss(labels, upsampled_logits) auxiliary_loss = masked_loss(labels, upsampled_auxiliary_logits) diff --git a/src/transformers/models/segformer/modeling_tf_segformer.py b/src/transformers/models/segformer/modeling_tf_segformer.py index 25350d1c82559a..c2f4b2ff0c7cd8 100644 --- a/src/transformers/models/segformer/modeling_tf_segformer.py +++ b/src/transformers/models/segformer/modeling_tf_segformer.py @@ -201,9 +201,9 @@ def __init__(self, config: SegformerConfig, hidden_size: int, **kwargs): self.dense = tf.keras.layers.Dense(hidden_size, name="dense") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) - def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_states = self.dense(hidden_states) - hidden_states = self.dropout(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) return hidden_states @@ -276,13 +276,13 @@ def __init__( self.dense2 = tf.keras.layers.Dense(out_features, name="dense2") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) - def call(self, hidden_states: tf.Tensor, height: int, width: int) -> tf.Tensor: + def call(self, hidden_states: tf.Tensor, height: int, width: int, training: bool = False) -> tf.Tensor: hidden_states = self.dense1(hidden_states) hidden_states = self.depthwise_convolution(hidden_states, height, width) hidden_states = self.intermediate_act_fn(hidden_states) - hidden_states = self.dropout(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.dense2(hidden_states) - hidden_states = self.dropout(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) return hidden_states @@ -749,7 +749,7 @@ def __init__(self, config: SegformerConfig, **kwargs): self.config = config - def call(self, encoder_hidden_states): + def call(self, encoder_hidden_states, training: bool = False): batch_size = shape_list(encoder_hidden_states[-1])[0] all_hidden_states = () @@ -773,9 +773,9 @@ def call(self, encoder_hidden_states): all_hidden_states += (encoder_hidden_state,) hidden_states = self.linear_fuse(tf.concat(all_hidden_states[::-1], axis=-1)) - hidden_states = self.batch_norm(hidden_states) + hidden_states = self.batch_norm(hidden_states, training=training) hidden_states = self.activation(hidden_states) - hidden_states = self.dropout(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) # logits of shape (batch_size, height/4, width/4, num_labels) logits = self.classifier(hidden_states) diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 37b58cd8146601..6df601ca646af3 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -279,6 +279,9 @@ def __init__(self, *args, **kwargs): TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None +TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = None + + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None diff --git a/tests/models/segformer/test_modeling_tf_segformer.py b/tests/models/segformer/test_modeling_tf_segformer.py index 6cc2c77fe935fc..d6a73e22192c3b 100644 --- a/tests/models/segformer/test_modeling_tf_segformer.py +++ b/tests/models/segformer/test_modeling_tf_segformer.py @@ -27,6 +27,7 @@ if is_tf_available(): + import numpy as np import tensorflow as tf from transformers import TFSegformerForImageClassification, TFSegformerForSemanticSegmentation, TFSegformerModel @@ -336,6 +337,9 @@ def recursive_check(tuple_object, dict_object): def test_dataset_conversion(self): super().test_dataset_conversion() + def check_keras_fit_results(self, val_loss1, val_loss2, atol=2e-1, rtol=2e-1): + self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) + @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, reason="TF (<=2.8) does not support backprop for grouped convolutions on CPU.", diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index d63b1b32733e89..15855e6a1f40e6 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -62,11 +62,13 @@ from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, @@ -170,6 +172,15 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> d inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) + elif model_class in get_values(TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING): + num_patches = self.model_tester.image_size // self.model_tester.patch_size + inputs_dict["bool_masked_pos"] = tf.zeros( + (self.model_tester.batch_size, num_patches**2), dtype=tf.int32 + ) + elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): + batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape + inputs_dict["labels"] = tf.zeros((self.model_tester.batch_size, height, width), dtype=tf.int32) + return inputs_dict def test_initialization(self): @@ -1389,6 +1400,9 @@ def test_loss_computation(self): self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3): + self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) + def test_keras_fit(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: @@ -1468,7 +1482,7 @@ def test_keras_fit(self): val_loss2 = history2.history["val_loss"][0] self.assertTrue(not isnan(val_loss2)) accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")} - self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3)) + self.check_keras_fit_results(val_loss1, val_loss2) self.assertEqual(history1.history.keys(), history2.history.keys()) for key in history1.history.keys(): if not key.startswith("val_"): @@ -1494,7 +1508,7 @@ def test_keras_fit(self): val_loss3 = history3.history["val_loss"][0] self.assertTrue(not isnan(val_loss3)) accuracy3 = {key: val[0] for key, val in history3.history.items() if key.endswith("accuracy")} - self.assertTrue(np.allclose(val_loss1, val_loss3, atol=1e-2, rtol=1e-3)) + self.check_keras_fit_results(val_loss1, val_loss3) self.assertEqual(history1.history.keys(), history3.history.keys()) if metrics: self.assertTrue(len(accuracy1) == len(accuracy3) > 0, "Missing metrics!") From 586dcf6b2146ae9e2e8961b49c5e6706ae14f724 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Fri, 5 Aug 2022 08:45:07 +0200 Subject: [PATCH 018/539] Fixing issue where generic model types wouldn't load properly with the pipeline (#18392) * Adding a better error message when the model is improperly configured within transformers. * Update src/transformers/pipelines/__init__.py * Black version. * Overriding task aliases so that tokenizer+feature_extractor values are correct. * Fixing task aliases by overriding their names early * X. * Fixing feature-extraction. * black again. * Normalizing `translation` too. * Fixing last few corner cases. translation need to use its non normalized name (translation_XX_to_YY, so that the task_specific_params are correctly overloaded). This can be removed and cleaned up in a later PR. `speech-encode-decoder` actually REQUIRES to pass a `tokenizer` manually so the error needs to be discarded when the `tokenizer` is already there. * doc-builder fix. * Fixing the real issue. * Removing dead code. * Do not import the actual config classes. --- src/transformers/pipelines/__init__.py | 36 +++++++++++++++++-- src/transformers/pipelines/base.py | 9 +++-- ..._pipelines_automatic_speech_recognition.py | 9 +---- 3 files changed, 38 insertions(+), 16 deletions(-) diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 23bc541473a63b..7a022e5635e674 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -311,6 +311,11 @@ NO_FEATURE_EXTRACTOR_TASKS = set() NO_TOKENIZER_TASKS = set() +# Those model configs are special, they are generic over their task, meaning +# any tokenizer/feature_extractor might be use for a given model so we cannot +# use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to +# see if the model defines such objects or not. +MULTI_MODEL_CONFIGS = {"VisionTextDualEncoderConfig", "SpeechEncoderDecoderConfig"} for task, values in SUPPORTED_TASKS.items(): if values["type"] == "text": NO_FEATURE_EXTRACTOR_TASKS.add(task) @@ -380,8 +385,9 @@ def check_task(task: str) -> Tuple[Dict, Any]: - `"zero-shot-image-classification"` Returns: - (task_defaults`dict`, task_options: (`tuple`, None)) The actual dictionary required to initialize the pipeline - and some extra task options for parametrized tasks like "translation_XX_to_YY" + (normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name + (removed alias and options). The actual dictionary required to initialize the pipeline and some extra task + options for parametrized tasks like "translation_XX_to_YY" """ @@ -614,7 +620,7 @@ def pipeline( model, module_file + ".py", class_name, revision=revision, use_auth_token=use_auth_token ) else: - targeted_task, task_options = check_task(task) + normalized_task, targeted_task, task_options = check_task(task) if pipeline_class is None: pipeline_class = targeted_task["impl"] @@ -667,12 +673,36 @@ def pipeline( load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None + if ( + tokenizer is None + and not load_tokenizer + and normalized_task not in NO_TOKENIZER_TASKS + # Using class name to avoid importing the real class. + and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS + ): + # This is a special category of models, that are fusions of multiple models + # so the model_config might not define a tokenizer, but it seems to be + # necessary for the task, so we're force-trying to load it. + load_tokenizer = True + if ( + feature_extractor is None + and not load_feature_extractor + and normalized_task not in NO_FEATURE_EXTRACTOR_TASKS + # Using class name to avoid importing the real class. + and model_config.__class__.__name__ in MULTI_MODEL_CONFIGS + ): + # This is a special category of models, that are fusions of multiple models + # so the model_config might not define a tokenizer, but it seems to be + # necessary for the task, so we're force-trying to load it. + load_feature_extractor = True + if task in NO_TOKENIZER_TASKS: # These will never require a tokenizer. # the model on the other hand might have a tokenizer, but # the files could be missing from the hub, instead of failing # on such repos, we just force to not load it. load_tokenizer = False + if task in NO_FEATURE_EXTRACTOR_TASKS: load_feature_extractor = False diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index a3e11eb60060df..6e2c28e5ddf84d 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -630,7 +630,6 @@ def __iter__(self): for line in sys.stdin: # Split for multi-columns if "\t" in line: - line = line.split("\t") if self.column: # Dictionary to map arguments @@ -752,7 +751,6 @@ def __init__( binary_output: bool = False, **kwargs, ): - if framework is None: framework, model = infer_framework_load_model(model, config=model.config) @@ -1123,18 +1121,19 @@ def get_supported_tasks(self) -> List[str]: supported_task.sort() return supported_task - def check_task(self, task: str) -> Tuple[Dict, Any]: + def check_task(self, task: str) -> Tuple[str, Dict, Any]: if task in self.task_aliases: task = self.task_aliases[task] if task in self.supported_tasks: targeted_task = self.supported_tasks[task] - return targeted_task, None + return task, targeted_task, None if task.startswith("translation"): tokens = task.split("_") if len(tokens) == 4 and tokens[0] == "translation" and tokens[2] == "to": targeted_task = self.supported_tasks["translation"] - return targeted_task, (tokens[1], tokens[3]) + task = "translation" + return task, targeted_task, (tokens[1], tokens[3]) raise KeyError(f"Invalid translation task {task}, use 'translation_XX_to_YY' format") raise KeyError( diff --git a/tests/pipelines/test_pipelines_automatic_speech_recognition.py b/tests/pipelines/test_pipelines_automatic_speech_recognition.py index 25bf520eafb4d7..0523639cc4fe85 100644 --- a/tests/pipelines/test_pipelines_automatic_speech_recognition.py +++ b/tests/pipelines/test_pipelines_automatic_speech_recognition.py @@ -141,15 +141,8 @@ def test_small_model_pt(self): @require_torch def test_small_model_pt_seq2seq(self): - model_id = "hf-internal-testing/tiny-random-speech-encoder-decoder" - tokenizer = AutoTokenizer.from_pretrained(model_id) - feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) - speech_recognizer = pipeline( - task="automatic-speech-recognition", - model=model_id, - tokenizer=tokenizer, - feature_extractor=feature_extractor, + model="hf-internal-testing/tiny-random-speech-encoder-decoder", framework="pt", ) From 575aa6ef1ac2e8812807f8a5f7c6edcd6b3cf012 Mon Sep 17 00:00:00 2001 From: Seunghwan Hong Date: Fri, 5 Aug 2022 20:39:40 +0900 Subject: [PATCH 019/539] Fix TFSwinSelfAttention to have relative position index as non-trainable weight (#18226) Signed-off-by: Seunghwan Hong --- .../models/swin/modeling_tf_swin.py | 37 +++++++++++-------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/src/transformers/models/swin/modeling_tf_swin.py b/src/transformers/models/swin/modeling_tf_swin.py index c781710bdd0ff5..dc0e7131628b89 100644 --- a/src/transformers/models/swin/modeling_tf_swin.py +++ b/src/transformers/models/swin/modeling_tf_swin.py @@ -461,21 +461,6 @@ def __init__(self, config: SwinConfig, dim: int, num_heads: int, **kwargs) -> No window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) - # get pair-wise relative position index for each token inside the window - coords_h = tf.range(self.window_size[0]) - coords_w = tf.range(self.window_size[1]) - coords = tf.stack(tf.meshgrid(coords_h, coords_w, indexing="ij")) - coords_flatten = tf.reshape(coords, (shape_list(coords)[0], -1)) - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] - relative_coords = tf.transpose(relative_coords, (1, 2, 0)) - - stack_0, stack_1 = tf.unstack(relative_coords, axis=2) - stack_0 += self.window_size[0] - 1 - stack_0 *= 2 * self.window_size[1] - 1 - stack_1 += self.window_size[1] - 1 - relative_coords = tf.stack([stack_0, stack_1], axis=2) - self.relative_position_index = tf.reduce_sum(relative_coords, axis=-1) - self.query = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), @@ -503,6 +488,28 @@ def build(self, input_shape: tf.TensorShape) -> None: initializer="zeros", name="relative_position_bias_table", ) + self.relative_position_index = self.add_weight( + shape=(self.window_size[0] ** 2, self.window_size[1] ** 2), + trainable=False, + dtype=tf.int32, + name="relative_position_index", + ) + + # get pair-wise relative position index for each token inside the window + coords_h = tf.range(self.window_size[0]) + coords_w = tf.range(self.window_size[1]) + coords = tf.stack(tf.meshgrid(coords_h, coords_w, indexing="ij")) + coords_flatten = tf.reshape(coords, (shape_list(coords)[0], -1)) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = tf.transpose(relative_coords, (1, 2, 0)) + + stack_0, stack_1 = tf.unstack(relative_coords, axis=2) + stack_0 += self.window_size[0] - 1 + stack_0 *= 2 * self.window_size[1] - 1 + stack_1 += self.window_size[1] - 1 + relative_coords = tf.stack([stack_0, stack_1], axis=2) + + self.relative_position_index.assign(tf.cast(tf.reduce_sum(relative_coords, axis=-1), tf.int32)) super().build(input_shape) def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor: From bf174f916bf8f470f054259df8a62bf15578b8c5 Mon Sep 17 00:00:00 2001 From: Seunghwan Hong Date: Fri, 5 Aug 2022 20:40:14 +0900 Subject: [PATCH 020/539] Refactor `TFSwinLayer` to increase serving compatibility (#18352) * Refactor `TFSwinLayer` to increase serving compatibility Signed-off-by: Seunghwan Hong * Fix missed parameters while refactoring Signed-off-by: Seunghwan Hong * Fix window_reverse to calculate batch size Signed-off-by: Seunghwan Hong Co-Authored-By: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- .../models/swin/modeling_tf_swin.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/transformers/models/swin/modeling_tf_swin.py b/src/transformers/models/swin/modeling_tf_swin.py index dc0e7131628b89..2f9bd27b0e0006 100644 --- a/src/transformers/models/swin/modeling_tf_swin.py +++ b/src/transformers/models/swin/modeling_tf_swin.py @@ -226,9 +226,9 @@ def window_reverse(windows: tf.Tensor, window_size: int, height: int, width: int """ Merges windows to produce higher resolution features. """ - x = shape_list(windows)[0] + x = tf.shape(windows)[0] y = tf.cast(height * width / (window_size * window_size), tf.int32) - batch_size = int(x / y) + batch_size = tf.math.floordiv(x, y) windows = tf.reshape( windows, (batch_size, height // window_size, width // window_size, window_size, window_size, -1) ) @@ -695,16 +695,18 @@ def get_attn_mask(self, height: int, width: int, window_size: int, shift_size: i img_mask = tf.expand_dims(img_mask, -1) img_mask = tf.expand_dims(img_mask, 0) - mask_windows = window_partition(img_mask, self.window_size) - mask_windows = tf.reshape(mask_windows, (-1, self.window_size * self.window_size)) + mask_windows = window_partition(img_mask, window_size) + mask_windows = tf.reshape(mask_windows, (-1, window_size * window_size)) attn_mask = tf.expand_dims(mask_windows, 1) - tf.expand_dims(mask_windows, 2) attn_mask = tf.where(attn_mask != 0, float(-100.0), attn_mask) attn_mask = tf.where(attn_mask == 0, float(0.0), attn_mask) return attn_mask - def maybe_pad(self, hidden_states: tf.Tensor, height: int, width: int) -> Tuple[tf.Tensor, tf.Tensor]: - pad_right = (self.window_size - width % self.window_size) % self.window_size - pad_bottom = (self.window_size - height % self.window_size) % self.window_size + def maybe_pad( + self, hidden_states: tf.Tensor, window_size: int, height: int, width: int + ) -> Tuple[tf.Tensor, tf.Tensor]: + pad_right = (window_size - width % window_size) % window_size + pad_bottom = (window_size - height % window_size) % window_size pad_values = [[0, 0], [0, pad_bottom], [0, pad_right], [0, 0]] hidden_states = tf.pad(hidden_states, pad_values) pad_values = tf.reshape(pad_values, (-1,)) @@ -730,7 +732,7 @@ def call( hidden_states = self.layernorm_before(hidden_states, training=training) hidden_states = tf.reshape(hidden_states, (batch_size, height, width, channels)) # pad hidden_states to multiples of window size - hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) + hidden_states, pad_values = self.maybe_pad(hidden_states, window_size, height, width) _, height_pad, width_pad, _ = shape_list(hidden_states) # cyclic shift From 893122f6662acb339c7e1a014834b2670ae00e0b Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 5 Aug 2022 13:59:55 +0200 Subject: [PATCH 021/539] Add TF prefix to TF-Res test class (#18481) Co-authored-by: ydshieh --- tests/models/resnet/test_modeling_tf_resnet.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/models/resnet/test_modeling_tf_resnet.py b/tests/models/resnet/test_modeling_tf_resnet.py index 5f4eead8661cbf..1056ebc8eeac4f 100644 --- a/tests/models/resnet/test_modeling_tf_resnet.py +++ b/tests/models/resnet/test_modeling_tf_resnet.py @@ -41,7 +41,7 @@ from transformers import AutoFeatureExtractor -class ResNetModelTester: +class TFResNetModelTester: def __init__( self, parent, @@ -116,7 +116,7 @@ def prepare_config_and_inputs_for_common(self): @require_tf -class ResNetModelTest(TFModelTesterMixin, unittest.TestCase): +class TFResNetModelTest(TFModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ResNet does not use input_ids, inputs_embeds, attention_mask and seq_length. @@ -131,7 +131,7 @@ class ResNetModelTest(TFModelTesterMixin, unittest.TestCase): has_attentions = False def setUp(self): - self.model_tester = ResNetModelTester(self) + self.model_tester = TFResNetModelTester(self) self.config_tester = ConfigTester(self, config_class=ResNetConfig, has_text_modality=False) def test_config(self): @@ -223,7 +223,7 @@ def prepare_img(): @require_tf @require_vision -class ResNetModelIntegrationTest(unittest.TestCase): +class TFResNetModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( From c7849d9efcf826021da0dd5721f380f377898980 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 5 Aug 2022 09:12:19 -0400 Subject: [PATCH 022/539] Remove py.typed (#18485) --- setup.py | 1 - src/transformers/py.typed | 1 - 2 files changed, 2 deletions(-) delete mode 100644 src/transformers/py.typed diff --git a/setup.py b/setup.py index 9c37822ac98fdb..52b7837a88456d 100644 --- a/setup.py +++ b/setup.py @@ -412,7 +412,6 @@ def run(self): url="https://github.com/huggingface/transformers", package_dir={"": "src"}, packages=find_packages("src"), - package_data={"transformers": ["py.typed"]}, zip_safe=False, extras_require=extras, entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]}, diff --git a/src/transformers/py.typed b/src/transformers/py.typed deleted file mode 100644 index 8b137891791fe9..00000000000000 --- a/src/transformers/py.typed +++ /dev/null @@ -1 +0,0 @@ - From 70fa1a8d2652fea2e079bb253fa62d478d00647c Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 5 Aug 2022 09:14:51 -0400 Subject: [PATCH 023/539] Fix pipeline tests (#18487) * Fix pipeline tests * Make sure all pipelines tests run with init changes --- src/transformers/pipelines/__init__.py | 1 + tests/pipelines/test_pipelines_common.py | 4 ++-- utils/tests_fetcher.py | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 7a022e5635e674..d2a4b663801d78 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -606,6 +606,7 @@ def pipeline( # Retrieve the task if task in custom_tasks: + normalized_task = task targeted_task, task_options = clean_custom_task(custom_tasks[task]) if pipeline_class is None: if not trust_remote_code: diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 83474a5ba048e2..5d5c8fa2333eb6 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -795,7 +795,7 @@ def test_warning_logs(self): alias = "text-classification" # Get the original task, so we can restore it at the end. # (otherwise the subsequential tests in `TextClassificationPipelineTests` will fail) - original_task, original_task_options = PIPELINE_REGISTRY.check_task(alias) + _, original_task, _ = PIPELINE_REGISTRY.check_task(alias) try: with CaptureLogger(logger_) as cm: @@ -816,7 +816,7 @@ def test_register_pipeline(self): ) assert "custom-text-classification" in PIPELINE_REGISTRY.get_supported_tasks() - task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification") + _, task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification") self.assertEqual(task_def["pt"], (AutoModelForSequenceClassification,) if is_torch_available() else ()) self.assertEqual(task_def["tf"], (TFAutoModelForSequenceClassification,) if is_tf_available() else ()) self.assertEqual(task_def["type"], "text") diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index 9f18bb83c7ee7f..329d248de3c089 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -377,6 +377,7 @@ def create_reverse_dependency_map(): ], "optimization.py": "optimization/test_optimization.py", "optimization_tf.py": "optimization/test_optimization_tf.py", + "pipelines/__init__.py": "pipelines/test_pipelines_*.py", "pipelines/base.py": "pipelines/test_pipelines_*.py", "pipelines/text2text_generation.py": [ "pipelines/test_pipelines_text2text_generation.py", From 5cd40323684c183c30b34758aea1e877996a7ac9 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 5 Aug 2022 10:12:40 -0400 Subject: [PATCH 024/539] Use new huggingface_hub tools for download models (#18438) * Draft new cached_file * Initial draft for config and model * Small fixes * Fix first batch of tests * Look in cache when internet is down * Fix last tests * Bad black, not fixing all quality errors * Make diff less * Implement change for TF and Flax models * Add tokenizer and feature extractor * For compatibility with main * Add utils to move the cache and auto-do it at first use. * Quality * Deal with empty commit shas * Deal with empty etag * Address review comments --- src/transformers/configuration_utils.py | 118 ++--- src/transformers/feature_extraction_utils.py | 103 ++--- src/transformers/modeling_flax_utils.py | 139 +++--- src/transformers/modeling_tf_utils.py | 139 +++--- src/transformers/modeling_utils.py | 136 ++---- src/transformers/tokenization_utils_base.py | 94 +--- src/transformers/utils/__init__.py | 2 + src/transformers/utils/hub.py | 456 +++++++++++++++++-- tests/test_configuration_common.py | 4 +- tests/test_feature_extraction_common.py | 4 +- tests/test_modeling_common.py | 4 +- tests/test_modeling_tf_common.py | 4 +- tests/test_tokenization_common.py | 4 +- 13 files changed, 662 insertions(+), 545 deletions(-) diff --git a/src/transformers/configuration_utils.py b/src/transformers/configuration_utils.py index fe2d6b3aaef637..b10475127b4fce 100755 --- a/src/transformers/configuration_utils.py +++ b/src/transformers/configuration_utils.py @@ -25,25 +25,9 @@ from packaging import version -from requests import HTTPError - from . import __version__ from .dynamic_module_utils import custom_object_save -from .utils import ( - CONFIG_NAME, - HUGGINGFACE_CO_RESOLVE_ENDPOINT, - EntryNotFoundError, - PushToHubMixin, - RepositoryNotFoundError, - RevisionNotFoundError, - cached_path, - copy_func, - hf_bucket_url, - is_offline_mode, - is_remote_url, - is_torch_available, - logging, -) +from .utils import CONFIG_NAME, PushToHubMixin, cached_file, copy_func, is_torch_available, logging logger = logging.get_logger(__name__) @@ -591,77 +575,43 @@ def _get_config_dict( if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline - if is_offline_mode() and not local_files_only: - logger.info("Offline mode: forcing local_files_only=True") - local_files_only = True - pretrained_model_name_or_path = str(pretrained_model_name_or_path) - if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)) or is_remote_url( - pretrained_model_name_or_path - ): - config_file = pretrained_model_name_or_path + + is_local = os.path.isdir(pretrained_model_name_or_path) + if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): + # Soecial case when pretrained_model_name_or_path is a local file + resolved_config_file = pretrained_model_name_or_path + is_local = True else: configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME) - if os.path.isdir(os.path.join(pretrained_model_name_or_path, subfolder)): - config_file = os.path.join(pretrained_model_name_or_path, subfolder, configuration_file) - else: - config_file = hf_bucket_url( + try: + # Load from local folder or from cache or download from model Hub and cache + resolved_config_file = cached_file( pretrained_model_name_or_path, - filename=configuration_file, + configuration_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + use_auth_token=use_auth_token, + user_agent=user_agent, revision=revision, - subfolder=subfolder if len(subfolder) > 0 else None, - mirror=None, + subfolder=subfolder, + ) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to + # the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the same" + f" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory" + f" containing a {configuration_file} file" ) - - try: - # Load from URL or cache if already cached - resolved_config_file = cached_path( - config_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - ) - - except RepositoryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on " - "'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having " - "permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass " - "`use_auth_token=True`." - ) - except RevisionNotFoundError: - raise EnvironmentError( - f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this " - f"model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for " - "available revisions." - ) - except EntryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} does not appear to have a file named {configuration_file}." - ) - except HTTPError as err: - raise EnvironmentError( - f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" - ) - except ValueError: - raise EnvironmentError( - f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in" - f" the cached files and it looks like {pretrained_model_name_or_path} is not the path to a directory" - f" containing a {configuration_file} file.\nCheckout your internet connection or see how to run the" - " library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'." - ) - except EnvironmentError: - raise EnvironmentError( - f"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from " - "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " - f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " - f"containing a {configuration_file} file" - ) try: # Load config dict @@ -671,10 +621,10 @@ def _get_config_dict( f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file." ) - if resolved_config_file == config_file: - logger.info(f"loading configuration file {config_file}") + if is_local: + logger.info(f"loading configuration file {resolved_config_file}") else: - logger.info(f"loading configuration file {config_file} from cache at {resolved_config_file}") + logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}") return config_dict, kwargs diff --git a/src/transformers/feature_extraction_utils.py b/src/transformers/feature_extraction_utils.py index b411d744284665..ec68f355191c1d 100644 --- a/src/transformers/feature_extraction_utils.py +++ b/src/transformers/feature_extraction_utils.py @@ -24,23 +24,15 @@ import numpy as np -from requests import HTTPError - from .dynamic_module_utils import custom_object_save from .utils import ( FEATURE_EXTRACTOR_NAME, - HUGGINGFACE_CO_RESOLVE_ENDPOINT, - EntryNotFoundError, PushToHubMixin, - RepositoryNotFoundError, - RevisionNotFoundError, TensorType, - cached_path, + cached_file, copy_func, - hf_bucket_url, is_flax_available, is_offline_mode, - is_remote_url, is_tf_available, is_torch_available, logging, @@ -388,64 +380,40 @@ def get_feature_extractor_dict( local_files_only = True pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME) - elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): - feature_extractor_file = pretrained_model_name_or_path + if os.path.isfile(pretrained_model_name_or_path): + resolved_feature_extractor_file = pretrained_model_name_or_path + is_local = True else: - feature_extractor_file = hf_bucket_url( - pretrained_model_name_or_path, filename=FEATURE_EXTRACTOR_NAME, revision=revision, mirror=None - ) - - try: - # Load from URL or cache if already cached - resolved_feature_extractor_file = cached_path( - feature_extractor_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - ) - - except RepositoryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on " - "'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a token having " - "permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass " - "`use_auth_token=True`." - ) - except RevisionNotFoundError: - raise EnvironmentError( - f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this " - f"model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for " - "available revisions." - ) - except EntryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} does not appear to have a file named {FEATURE_EXTRACTOR_NAME}." - ) - except HTTPError as err: - raise EnvironmentError( - f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}" - ) - except ValueError: - raise EnvironmentError( - f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in" - f" the cached files and it looks like {pretrained_model_name_or_path} is not the path to a directory" - f" containing a {FEATURE_EXTRACTOR_NAME} file.\nCheckout your internet connection or see how to run" - " the library in offline mode at" - " 'https://huggingface.co/docs/transformers/installation#offline-mode'." - ) - except EnvironmentError: - raise EnvironmentError( - f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load it " - "from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. " - f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " - f"containing a {FEATURE_EXTRACTOR_NAME} file" - ) + feature_extractor_file = FEATURE_EXTRACTOR_NAME + try: + # Load from local folder or from cache or download from model Hub and cache + resolved_feature_extractor_file = cached_file( + pretrained_model_name_or_path, + feature_extractor_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + use_auth_token=use_auth_token, + user_agent=user_agent, + revision=revision, + ) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to + # the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load" + " it from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a {FEATURE_EXTRACTOR_NAME} file" + ) try: # Load feature_extractor dict @@ -458,12 +426,11 @@ def get_feature_extractor_dict( f"It looks like the config file at '{resolved_feature_extractor_file}' is not a valid JSON file." ) - if resolved_feature_extractor_file == feature_extractor_file: - logger.info(f"loading feature extractor configuration file {feature_extractor_file}") + if is_local: + logger.info(f"loading configuration file {resolved_feature_extractor_file}") else: logger.info( - f"loading feature extractor configuration file {feature_extractor_file} from cache at" - f" {resolved_feature_extractor_file}" + f"loading configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}" ) return feature_extractor_dict, kwargs diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index 0dcb3bc959e83d..af75b418cad23e 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -32,7 +32,6 @@ from flax.serialization import from_bytes, to_bytes from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey -from requests import HTTPError from .configuration_utils import PretrainedConfig from .dynamic_module_utils import custom_object_save @@ -41,20 +40,14 @@ from .utils import ( FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME, - HUGGINGFACE_CO_RESOLVE_ENDPOINT, WEIGHTS_NAME, - EntryNotFoundError, PushToHubMixin, - RepositoryNotFoundError, - RevisionNotFoundError, add_code_sample_docstrings, add_start_docstrings_to_model_forward, - cached_path, + cached_file, copy_func, has_file, - hf_bucket_url, is_offline_mode, - is_remote_url, logging, replace_return_docstrings, ) @@ -557,6 +550,9 @@ def from_pretrained( The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or @@ -598,6 +594,7 @@ def from_pretrained( from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) _do_init = kwargs.pop("_do_init", True) + subfolder = kwargs.pop("subfolder", "") if trust_remote_code is True: logger.warning( @@ -642,6 +639,8 @@ def from_pretrained( # Load model if pretrained_model_name_or_path is not None: + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint @@ -665,65 +664,44 @@ def from_pretrained( f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_model_name_or_path}." ) - elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): + elif os.path.isfile(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path + is_local = True else: filename = WEIGHTS_NAME if from_pt else FLAX_WEIGHTS_NAME - archive_file = hf_bucket_url( - pretrained_model_name_or_path, - filename=filename, - revision=revision, - ) - - # redirect to the cache, if necessary + try: + # Load from URL or cache if already cached + cached_file_kwargs = dict( + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + use_auth_token=use_auth_token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _raise_exceptions_for_missing_entries=False, + ) + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) - try: - resolved_archive_file = cached_path( - archive_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - ) - - except RepositoryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " - "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " - "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " - "login` and pass `use_auth_token=True`." - ) - except RevisionNotFoundError: - raise EnvironmentError( - f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " - "this model name. Check the model page at " - f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." - ) - except EntryNotFoundError: - if filename == FLAX_WEIGHTS_NAME: - try: + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an expection but a None + # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None and filename == FLAX_WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. - archive_file = hf_bucket_url( - pretrained_model_name_or_path, - filename=FLAX_WEIGHTS_INDEX_NAME, - revision=revision, - ) - resolved_archive_file = cached_path( - archive_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, + resolved_archive_file = cached_file( + pretrained_model_name_or_path, FLAX_WEIGHTS_INDEX_NAME, **cached_file_kwargs ) - is_sharded = True - except EntryNotFoundError: - has_file_kwargs = {"revision": revision, "proxies": proxies, "use_auth_token": use_auth_token} + if resolved_archive_file is not None: + is_sharded = True + if resolved_archive_file is None: + # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error + # message. + has_file_kwargs = { + "revision": revision, + "proxies": proxies, + "use_auth_token": use_auth_token, + } if has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs): raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" @@ -735,35 +713,24 @@ def from_pretrained( f"{pretrained_model_name_or_path} does not appear to have a file named" f" {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." ) - else: + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted + # to the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. raise EnvironmentError( - f"{pretrained_model_name_or_path} does not appear to have a file named {filename}." + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." ) - except HTTPError as err: - raise EnvironmentError( - f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n" - f"{err}" - ) - except ValueError: - raise EnvironmentError( - f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" - f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" - f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your" - " internet connection or see how to run the library in offline mode at" - " 'https://huggingface.co/docs/transformers/installation#offline-mode'." - ) - except EnvironmentError: - raise EnvironmentError( - f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " - "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " - f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " - f"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}." - ) - - if resolved_archive_file == archive_file: + + if is_local: logger.info(f"loading weights file {archive_file}") + resolved_archive_file = archive_file else: - logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") + logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") else: resolved_archive_file = None diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index e1d8f5b7957be5..1a63d32e4196a0 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -37,7 +37,6 @@ from huggingface_hub import Repository, list_repo_files from keras.saving.hdf5_format import save_attributes_to_hdf5_group -from requests import HTTPError from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from . import DataCollatorWithPadding, DefaultDataCollator @@ -48,22 +47,16 @@ from .tf_utils import shape_list from .utils import ( DUMMY_INPUTS, - HUGGINGFACE_CO_RESOLVE_ENDPOINT, TF2_WEIGHTS_INDEX_NAME, TF2_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, - EntryNotFoundError, ModelOutput, PushToHubMixin, - RepositoryNotFoundError, - RevisionNotFoundError, - cached_path, + cached_file, find_labels, has_file, - hf_bucket_url, is_offline_mode, - is_remote_url, logging, requires_backends, working_or_temp_dir, @@ -2112,6 +2105,9 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., `output_attentions=True`). Behaves differently depending on whether a `config` is provided or @@ -2164,6 +2160,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): load_weight_prefix = kwargs.pop("load_weight_prefix", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) + subfolder = kwargs.pop("subfolder", "") if trust_remote_code is True: logger.warning( @@ -2202,9 +2199,10 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. is_sharded = False - sharded_metadata = None # Load model if pretrained_model_name_or_path is not None: + pretrained_model_name_or_path = str(pretrained_model_name_or_path) + is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint in priority if from_pt @@ -2232,68 +2230,43 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): f"Error no file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory " f"{pretrained_model_name_or_path}." ) - elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): + elif os.path.isfile(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path + is_local = True elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" + is_local = True else: + # set correct filename filename = WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME - archive_file = hf_bucket_url( - pretrained_model_name_or_path, - filename=filename, - revision=revision, - mirror=mirror, - ) - try: - # Load from URL or cache if already cached - resolved_archive_file = cached_path( - archive_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - ) + try: + # Load from URL or cache if already cached + cached_file_kwargs = dict( + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + use_auth_token=use_auth_token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _raise_exceptions_for_missing_entries=False, + ) + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) - except RepositoryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " - "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " - "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " - "login` and pass `use_auth_token=True`." - ) - except RevisionNotFoundError: - raise EnvironmentError( - f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " - "this model name. Check the model page at " - f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." - ) - except EntryNotFoundError: - if filename == TF2_WEIGHTS_NAME: - try: + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an expection but a None + # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None and filename == TF2_WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. - archive_file = hf_bucket_url( - pretrained_model_name_or_path, - filename=TF2_WEIGHTS_INDEX_NAME, - revision=revision, - mirror=mirror, - ) - resolved_archive_file = cached_path( - archive_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, + resolved_archive_file = cached_file( + pretrained_model_name_or_path, TF2_WEIGHTS_INDEX_NAME, **cached_file_kwargs ) - is_sharded = True - except EntryNotFoundError: - # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error + if resolved_archive_file is not None: + is_sharded = True + if resolved_archive_file is None: + # Otherwise, maybe there is a PyTorch or Flax model file. We try those to give a helpful error # message. has_file_kwargs = { "revision": revision, @@ -2312,42 +2285,32 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): f"{pretrained_model_name_or_path} does not appear to have a file named" f" {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME}." ) - else: + + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted + # to the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + raise EnvironmentError( - f"{pretrained_model_name_or_path} does not appear to have a file named {filename}." + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME}." ) - except HTTPError as err: - raise EnvironmentError( - f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n" - f"{err}" - ) - except ValueError: - raise EnvironmentError( - f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" - f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" - f" directory containing a file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME}.\nCheckout your internet" - " connection or see how to run the library in offline mode at" - " 'https://huggingface.co/docs/transformers/installation#offline-mode'." - ) - except EnvironmentError: - raise EnvironmentError( - f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " - "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " - f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " - f"containing a file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME}." - ) - - if resolved_archive_file == archive_file: + if is_local: logger.info(f"loading weights file {archive_file}") + resolved_archive_file = archive_file else: - logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") + logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") else: resolved_archive_file = None # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. if is_sharded: # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. - resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( + resolved_archive_file, _ = get_checkpoint_shard_files( pretrained_model_name_or_path, resolved_archive_file, cache_dir=cache_dir, diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 5cd458d1f9d587..8709ec66365c66 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -31,7 +31,6 @@ from torch import Tensor, device, nn from torch.nn import CrossEntropyLoss -from requests import HTTPError from transformers.utils.hub import convert_file_size_to_int, get_checkpoint_shard_files from transformers.utils.import_utils import is_sagemaker_mp_enabled @@ -51,24 +50,18 @@ from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, - HUGGINGFACE_CO_RESOLVE_ENDPOINT, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, - EntryNotFoundError, ModelOutput, PushToHubMixin, - RepositoryNotFoundError, - RevisionNotFoundError, - cached_path, + cached_file, copy_func, has_file, - hf_bucket_url, is_accelerate_available, is_offline_mode, - is_remote_url, logging, replace_return_docstrings, ) @@ -1868,7 +1861,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P if pretrained_model_name_or_path is not None: pretrained_model_name_or_path = str(pretrained_model_name_or_path) - if os.path.isdir(pretrained_model_name_or_path): + is_local = os.path.isdir(pretrained_model_name_or_path) + if is_local: if from_tf and os.path.isfile( os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index") ): @@ -1911,10 +1905,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P f"Error no file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME + '.index'} or " f"{FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path}." ) - elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)) or is_remote_url( - pretrained_model_name_or_path - ): + elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): archive_file = pretrained_model_name_or_path + is_local = True elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path + ".index")): if not from_tf: raise ValueError( @@ -1922,6 +1915,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P "from_tf to True to load from this checkpoint." ) archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index") + is_local = True else: # set correct filename if from_tf: @@ -1931,63 +1925,32 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P else: filename = WEIGHTS_NAME - archive_file = hf_bucket_url( - pretrained_model_name_or_path, - filename=filename, - revision=revision, - mirror=mirror, - subfolder=subfolder if len(subfolder) > 0 else None, - ) - - try: - # Load from URL or cache if already cached - resolved_archive_file = cached_path( - archive_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - ) + try: + # Load from URL or cache if already cached + cached_file_kwargs = dict( + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + use_auth_token=use_auth_token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _raise_exceptions_for_missing_entries=False, + ) + resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) - except RepositoryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " - "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " - "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " - "login` and pass `use_auth_token=True`." - ) - except RevisionNotFoundError: - raise EnvironmentError( - f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for " - "this model name. Check the model page at " - f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." - ) - except EntryNotFoundError: - if filename == WEIGHTS_NAME: - try: + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an expection but a None + # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None and filename == WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. - archive_file = hf_bucket_url( - pretrained_model_name_or_path, - filename=WEIGHTS_INDEX_NAME, - revision=revision, - mirror=mirror, - subfolder=subfolder if len(subfolder) > 0 else None, - ) - resolved_archive_file = cached_path( - archive_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, + resolved_archive_file = cached_file( + pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs ) - is_sharded = True - except EntryNotFoundError: + if resolved_archive_file is not None: + is_sharded = True + if resolved_archive_file is None: # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error # message. has_file_kwargs = { @@ -2013,42 +1976,31 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME}," f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}." ) - else: + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted + # to the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. raise EnvironmentError( - f"{pretrained_model_name_or_path} does not appear to have a file named {filename}." + f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the" + f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a" + f" directory containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or" + f" {FLAX_WEIGHTS_NAME}." ) - except HTTPError as err: - raise EnvironmentError( - f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n" - f"{err}" - ) - except ValueError: - raise EnvironmentError( - f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it" - f" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a" - f" directory containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or" - f" {FLAX_WEIGHTS_NAME}.\nCheckout your internet connection or see how to run the library in" - " offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'." - ) - except EnvironmentError: - raise EnvironmentError( - f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from " - "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " - f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory " - f"containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or " - f"{FLAX_WEIGHTS_NAME}." - ) - if resolved_archive_file == archive_file: + if is_local: logger.info(f"loading weights file {archive_file}") + resolved_archive_file = archive_file else: - logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}") + logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}") else: resolved_archive_file = None # We'll need to download and cache each checkpoint shard if the checkpoint is sharded. if is_sharded: - # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. + # rsolved_archive_file becomes a list of files that point to the different checkpoint shards in this case. resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( pretrained_model_name_or_path, resolved_archive_file, diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index 8d24baf05bdb86..fc1c0ff8da3b32 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -35,21 +35,16 @@ from . import __version__ from .dynamic_module_utils import custom_object_save from .utils import ( - EntryNotFoundError, ExplicitEnum, PaddingStrategy, PushToHubMixin, - RepositoryNotFoundError, - RevisionNotFoundError, TensorType, add_end_docstrings, - cached_path, + cached_file, copy_func, get_file_from_repo, - hf_bucket_url, is_flax_available, is_offline_mode, - is_remote_url, is_tf_available, is_tokenizers_available, is_torch_available, @@ -1669,7 +1664,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], vocab_files = {} init_configuration = {} - if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): + is_local = os.path.isdir(pretrained_model_name_or_path) + if os.path.isfile(pretrained_model_name_or_path): if len(cls.vocab_files_names) > 1: raise ValueError( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not " @@ -1689,9 +1685,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], "special_tokens_map_file": SPECIAL_TOKENS_MAP_FILE, "tokenizer_config_file": TOKENIZER_CONFIG_FILE, } - vocab_files_target = {**cls.vocab_files_names, **additional_files_names} + vocab_files = {**cls.vocab_files_names, **additional_files_names} - if "tokenizer_file" in vocab_files_target: + if "tokenizer_file" in vocab_files: # Try to get the tokenizer config to see if there are versioned tokenizer files. fast_tokenizer_file = FULL_TOKENIZER_FILE resolved_config_file = get_file_from_repo( @@ -1704,80 +1700,38 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], use_auth_token=use_auth_token, revision=revision, local_files_only=local_files_only, + subfolder=subfolder, ) if resolved_config_file is not None: with open(resolved_config_file, encoding="utf-8") as reader: tokenizer_config = json.load(reader) if "fast_tokenizer_files" in tokenizer_config: fast_tokenizer_file = get_fast_tokenizer_file(tokenizer_config["fast_tokenizer_files"]) - vocab_files_target["tokenizer_file"] = fast_tokenizer_file - - # Look for the tokenizer files - for file_id, file_name in vocab_files_target.items(): - if os.path.isdir(pretrained_model_name_or_path): - if subfolder is not None: - full_file_name = os.path.join(pretrained_model_name_or_path, subfolder, file_name) - else: - full_file_name = os.path.join(pretrained_model_name_or_path, file_name) - if not os.path.exists(full_file_name): - logger.info(f"Didn't find file {full_file_name}. We won't load it.") - full_file_name = None - else: - full_file_name = hf_bucket_url( - pretrained_model_name_or_path, - filename=file_name, - subfolder=subfolder, - revision=revision, - mirror=None, - ) - - vocab_files[file_id] = full_file_name + vocab_files["tokenizer_file"] = fast_tokenizer_file # Get files from url, cache, or disk depending on the case resolved_vocab_files = {} unresolved_files = [] for file_id, file_path in vocab_files.items(): + print(file_id, file_path) if file_path is None: resolved_vocab_files[file_id] = None else: - try: - resolved_vocab_files[file_id] = cached_path( - file_path, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - user_agent=user_agent, - ) - - except FileNotFoundError as error: - if local_files_only: - unresolved_files.append(file_id) - else: - raise error - - except RepositoryNotFoundError: - raise EnvironmentError( - f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier " - "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to " - "pass a token having permission to this repo with `use_auth_token` or log in with " - "`huggingface-cli login` and pass `use_auth_token=True`." - ) - except RevisionNotFoundError: - raise EnvironmentError( - f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists " - "for this model name. Check the model page at " - f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions." - ) - except EntryNotFoundError: - logger.debug(f"{pretrained_model_name_or_path} does not contain a file named {file_path}.") - resolved_vocab_files[file_id] = None - - except ValueError: - logger.debug(f"Connection problem to access {file_path} and it wasn't found in the cache.") - resolved_vocab_files[file_id] = None + resolved_vocab_files[file_id] = cached_file( + pretrained_model_name_or_path, + file_path, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + use_auth_token=use_auth_token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) if len(unresolved_files) > 0: logger.info( @@ -1797,7 +1751,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], if file_id not in resolved_vocab_files: continue - if file_path == resolved_vocab_files[file_id]: + if is_local: logger.info(f"loading file {file_path}") else: logger.info(f"loading file {file_path} from cache at {resolved_vocab_files[file_id]}") diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 377932e2d490e7..023dffc27a703b 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -60,6 +60,7 @@ PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, + cached_file, cached_path, default_cache_path, define_sagemaker_information, @@ -76,6 +77,7 @@ is_local_clone, is_offline_mode, is_remote_url, + move_cache, send_example_telemetry, url_to_filename, ) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 1fd22d7a7cb70f..9e81654cda7e1a 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -19,11 +19,13 @@ import io import json import os +import re import shutil import subprocess import sys import tarfile import tempfile +import traceback import warnings from contextlib import contextmanager from functools import partial @@ -34,9 +36,20 @@ from uuid import uuid4 from zipfile import ZipFile, is_zipfile +import huggingface_hub import requests from filelock import FileLock -from huggingface_hub import CommitOperationAdd, HfFolder, create_commit, create_repo, list_repo_files, whoami +from huggingface_hub import ( + CommitOperationAdd, + HfFolder, + create_commit, + create_repo, + hf_hub_download, + list_repo_files, + whoami, +) +from huggingface_hub.constants import HUGGINGFACE_HEADER_X_LINKED_ETAG, HUGGINGFACE_HEADER_X_REPO_COMMIT +from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from requests.exceptions import HTTPError from requests.models import Response from transformers.utils.logging import tqdm @@ -385,21 +398,6 @@ def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: return ua -class RepositoryNotFoundError(HTTPError): - """ - Raised when trying to access a hf.co URL with an invalid repository name, or with a private repo name the user does - not have access to. - """ - - -class EntryNotFoundError(HTTPError): - """Raised when trying to access a hf.co URL with a valid repository and revision but an invalid filename.""" - - -class RevisionNotFoundError(HTTPError): - """Raised when trying to access a hf.co URL with a valid repository but an invalid revision.""" - - def _raise_for_status(response: Response): """ Internal version of `request.raise_for_status()` that will refine a potential HTTPError. @@ -628,8 +626,58 @@ def _resumable_file_manager() -> "io.BufferedWriter": return cache_path -def get_file_from_repo( - path_or_repo: Union[str, os.PathLike], +def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None): + """ + Explores the cache to return the latest cached file for a given revision. + """ + if revision is None: + revision = "main" + + model_id = repo_id.replace("/", "--") + model_cache = os.path.join(cache_dir, f"models--{model_id}") + if not os.path.isdir(model_cache): + # No cache for this model + return None + + # Resolve refs (for instance to convert main to the associated commit sha) + cached_refs = os.listdir(os.path.join(model_cache, "refs")) + if revision in cached_refs: + with open(os.path.join(model_cache, "refs", revision)) as f: + revision = f.read() + + cached_shas = os.listdir(os.path.join(model_cache, "snapshots")) + if revision not in cached_shas: + # No cache for this revision and we won't try to return a random revision + return None + + cached_file = os.path.join(model_cache, "snapshots", revision, filename) + return cached_file if os.path.isfile(cached_file) else None + + +# If huggingface_hub changes the class of error for this to FileNotFoundError, we will be able to avoid that in the +# future. +LOCAL_FILES_ONLY_HF_ERROR = ( + "Cannot find the requested files in the disk cache and outgoing traffic has been disabled. To enable hf.co " + "look-ups and downloads online, set 'local_files_only' to False." +) + + +# In the future, this ugly contextmanager can be removed when huggingface_hub as a released version where we can +# activate/deactivate progress bars. +@contextmanager +def _patch_hf_hub_tqdm(): + """ + A context manager to make huggingface hub use the tqdm version of Transformers (which is controlled by some utils) + in logging. + """ + old_tqdm = huggingface_hub.file_download.tqdm + huggingface_hub.file_download.tqdm = tqdm + yield + huggingface_hub.file_download.tqdm = old_tqdm + + +def cached_file( + path_or_repo_id: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, @@ -638,12 +686,16 @@ def get_file_from_repo( use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, + subfolder: str = "", + user_agent: Optional[Union[str, Dict[str, str]]] = None, + _raise_exceptions_for_missing_entries=True, + _raise_exceptions_for_connection_errors=True, ): """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: - path_or_repo (`str` or `os.PathLike`): + path_or_repo_id (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. @@ -670,6 +722,9 @@ def get_file_from_repo( identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. @@ -678,43 +733,56 @@ def get_file_from_repo( Returns: - `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the - file does not exist. + `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo). Examples: ```python - # Download a tokenizer configuration from huggingface.co and cache. - tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json") - # This model does not have a tokenizer config so the result will be None. - tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json") + # Download a model weight from the Hub and cache it. + model_weights_file = cached_file("bert-base-uncased", "pytorch_model.bin") ```""" if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True + if subfolder is None: + subfolder = "" + + path_or_repo_id = str(path_or_repo_id) + full_filename = os.path.join(subfolder, filename) + if os.path.isdir(path_or_repo_id): + resolved_file = os.path.join(os.path.join(path_or_repo_id, subfolder), filename) + if not os.path.isfile(resolved_file): + if _raise_exceptions_for_missing_entries: + raise EnvironmentError(f"Could not locate {full_filename} inside {path_or_repo_id}.") + else: + return None + return resolved_file - path_or_repo = str(path_or_repo) - if os.path.isdir(path_or_repo): - resolved_file = os.path.join(path_or_repo, filename) - return resolved_file if os.path.isfile(resolved_file) else None - else: - resolved_file = hf_bucket_url(path_or_repo, filename=filename, revision=revision, mirror=None) - + if cache_dir is None: + cache_dir = TRANSFORMERS_CACHE + if isinstance(cache_dir, Path): + cache_dir = str(cache_dir) + user_agent = http_user_agent(user_agent) try: # Load from URL or cache if already cached - resolved_file = cached_path( - resolved_file, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - local_files_only=local_files_only, - use_auth_token=use_auth_token, - ) + with _patch_hf_hub_tqdm(): + resolved_file = hf_hub_download( + path_or_repo_id, + filename, + subfolder=None if len(subfolder) == 0 else subfolder, + revision=revision, + cache_dir=cache_dir, + user_agent=user_agent, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + use_auth_token=use_auth_token, + local_files_only=local_files_only, + ) except RepositoryNotFoundError: raise EnvironmentError( - f"{path_or_repo} is not a local folder and is not a valid model identifier " + f"{path_or_repo_id} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to " "pass a token having permission to this repo with `use_auth_token` or log in with " "`huggingface-cli login` and pass `use_auth_token=True`." @@ -723,15 +791,129 @@ def get_file_from_repo( raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists " "for this model name. Check the model page at " - f"'https://huggingface.co/{path_or_repo}' for available revisions." + f"'https://huggingface.co/{path_or_repo_id}' for available revisions." + ) + except EntryNotFoundError: + if not _raise_exceptions_for_missing_entries: + return None + if revision is None: + revision = "main" + raise EnvironmentError( + f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " + f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files." + ) + except HTTPError as err: + # First we try to see if we have a cached version (not up to date): + resolved_file = try_to_load_from_cache(cache_dir, path_or_repo_id, full_filename, revision=revision) + if resolved_file is not None: + return resolved_file + if not _raise_exceptions_for_connection_errors: + return None + + raise EnvironmentError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{err}") + except ValueError as err: + # HuggingFace Hub returns a ValueError for a missing file when local_files_only=True we need to catch it here + # This could be caught above along in `EntryNotFoundError` if hf_hub sent a different error message here + if LOCAL_FILES_ONLY_HF_ERROR in err.args[0] and local_files_only and not _raise_exceptions_for_missing_entries: + return None + + # Otherwise we try to see if we have a cached version (not up to date): + resolved_file = try_to_load_from_cache(cache_dir, path_or_repo_id, full_filename, revision=revision) + if resolved_file is not None: + return resolved_file + if not _raise_exceptions_for_connection_errors: + return None + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the" + f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named" + f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at" + " 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) - except EnvironmentError: - # The repo and revision exist, but the file does not or there was a connection error fetching it. - return None return resolved_file +def get_file_from_repo( + path_or_repo: Union[str, os.PathLike], + filename: str, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + resume_download: bool = False, + proxies: Optional[Dict[str, str]] = None, + use_auth_token: Optional[Union[bool, str]] = None, + revision: Optional[str] = None, + local_files_only: bool = False, + subfolder: str = "", +): + """ + Tries to locate a file in a local folder and repo, downloads and cache it if necessary. + + Args: + path_or_repo (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a model repo on huggingface.co. + - a path to a *directory* potentially containing the file. + filename (`str`): + The name of the file to locate in `path_or_repo`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the standard + cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if they + exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + use_auth_token (`str` or *bool*, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated + when running `transformers-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + local_files_only (`bool`, *optional*, defaults to `False`): + If `True`, will only try to load the tokenizer configuration from local files. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + + + + Passing `use_auth_token=True` is required when you want to use a private model. + + + + Returns: + `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the + file does not exist. + + Examples: + + ```python + # Download a tokenizer configuration from huggingface.co and cache. + tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json") + # This model does not have a tokenizer config so the result will be None. + tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json") + ```""" + return cached_file( + path_or_repo_id=path_or_repo, + filename=filename, + cache_dir=cache_dir, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + use_auth_token=use_auth_token, + revision=revision, + local_files_only=local_files_only, + subfolder=subfolder, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + ) + + def has_file( path_or_repo: Union[str, os.PathLike], filename: str, @@ -766,7 +948,7 @@ def has_file( r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10) try: - _raise_for_status(r) + huggingface_hub.utils._errors._raise_for_status(r) return True except RepositoryNotFoundError as e: logger.error(e) @@ -1196,3 +1378,183 @@ def get_checkpoint_shard_files( cached_filenames.append(cached_filename) return cached_filenames, sharded_metadata + + +# All what is below is for conversion between old cache format and new cache format. + + +def get_all_cached_files(cache_dir=None): + """ + Returns a list for all files cached with appropriate metadata. + """ + if cache_dir is None: + cache_dir = TRANSFORMERS_CACHE + else: + cache_dir = str(cache_dir) + + cached_files = [] + for file in os.listdir(cache_dir): + meta_path = os.path.join(cache_dir, f"{file}.json") + if not os.path.isfile(meta_path): + continue + + with open(meta_path, encoding="utf-8") as meta_file: + metadata = json.load(meta_file) + url = metadata["url"] + etag = metadata["etag"].replace('"', "") + cached_files.append({"file": file, "url": url, "etag": etag}) + + return cached_files + + +def get_hub_metadata(url, token=None): + """ + Returns the commit hash and associated etag for a given url. + """ + if token is None: + token = HfFolder.get_token() + headers = {"user-agent": http_user_agent()} + headers["authorization"] = f"Bearer {token}" + + r = huggingface_hub.file_download._request_with_retry( + method="HEAD", url=url, headers=headers, allow_redirects=False + ) + huggingface_hub.file_download._raise_for_status(r) + commit_hash = r.headers.get(HUGGINGFACE_HEADER_X_REPO_COMMIT) + etag = r.headers.get(HUGGINGFACE_HEADER_X_LINKED_ETAG) or r.headers.get("ETag") + if etag is not None: + etag = huggingface_hub.file_download._normalize_etag(etag) + return etag, commit_hash + + +def extract_info_from_url(url): + """ + Extract repo_name, revision and filename from an url. + """ + search = re.search(r"^https://huggingface\.co/(.*)/resolve/([^/]*)/(.*)$", url) + if search is None: + return None + repo, revision, filename = search.groups() + cache_repo = "--".join(["models"] + repo.split("/")) + return {"repo": cache_repo, "revision": revision, "filename": filename} + + +def clean_files_for(file): + """ + Remove, if they exist, file, file.json and file.lock + """ + for f in [file, f"{file}.json", f"{file}.lock"]: + if os.path.isfile(f): + os.remove(f) + + +def move_to_new_cache(file, repo, filename, revision, etag, commit_hash): + """ + Move file to repo following the new huggingface hub cache organization. + """ + os.makedirs(repo, exist_ok=True) + + # refs + os.makedirs(os.path.join(repo, "refs"), exist_ok=True) + if revision != commit_hash: + ref_path = os.path.join(repo, "refs", revision) + with open(ref_path, "w") as f: + f.write(commit_hash) + + # blobs + os.makedirs(os.path.join(repo, "blobs"), exist_ok=True) + # TODO: replace copy by move when all works well. + blob_path = os.path.join(repo, "blobs", etag) + shutil.move(file, blob_path) + + # snapshots + os.makedirs(os.path.join(repo, "snapshots"), exist_ok=True) + os.makedirs(os.path.join(repo, "snapshots", commit_hash), exist_ok=True) + pointer_path = os.path.join(repo, "snapshots", commit_hash, filename) + huggingface_hub.file_download._create_relative_symlink(blob_path, pointer_path) + clean_files_for(file) + + +def move_cache(cache_dir=None, token=None): + if cache_dir is None: + cache_dir = TRANSFORMERS_CACHE + if token is None: + token = HfFolder.get_token() + cached_files = get_all_cached_files(cache_dir=cache_dir) + print(f"Moving {len(cached_files)} files to the new cache system") + + hub_metadata = {} + for file_info in tqdm(cached_files): + url = file_info.pop("url") + if url not in hub_metadata: + try: + hub_metadata[url] = get_hub_metadata(url, token=token) + except requests.HTTPError: + continue + + etag, commit_hash = hub_metadata[url] + if etag is None or commit_hash is None: + continue + + if file_info["etag"] != etag: + # Cached file is not up to date, we just throw it as a new version will be downloaded anyway. + clean_files_for(os.path.join(cache_dir, file_info["file"])) + continue + + url_info = extract_info_from_url(url) + if url_info is None: + # Not a file from huggingface.co + continue + + repo = os.path.join(cache_dir, url_info["repo"]) + move_to_new_cache( + file=os.path.join(cache_dir, file_info["file"]), + repo=repo, + filename=url_info["filename"], + revision=url_info["revision"], + etag=etag, + commit_hash=commit_hash, + ) + + +cache_version_file = os.path.join(TRANSFORMERS_CACHE, "version.txt") +if not os.path.isfile(cache_version_file): + cache_version = 0 +else: + with open(cache_version_file) as f: + cache_version = int(f.read()) + + +if cache_version < 1: + if is_offline_mode(): + logger.warn( + "You are offline and the cache for model files in Transformers v4.22.0 has been updated while your local " + "cache seems to be the one of a previous version. It is very likely that all your calls to any " + "`from_pretrained()` method will fail. Remove the offline mode and enable internet connection to have " + "your cache be updated automatically, then you can go back to offline mode." + ) + else: + logger.warn( + "The cache for model files in Transformers v4.22.0 has been udpated. Migrating your old cache. This is a " + "one-time only operation. You can interrupt this and resume the migration later on by calling " + "`transformers.utils.move_cache()`." + ) + try: + move_cache() + except Exception as e: + trace = "\n".join(traceback.format_tb(e.__traceback__)) + logger.error( + f"There was a problem when trying to move your cache:\n\n{trace}\n\nPlease file an issue at " + "https://github.com/huggingface/transformers/issues/new/choose and copy paste this whole message and we " + "will do our best to help." + ) + + try: + os.makedirs(TRANSFORMERS_CACHE, exist_ok=True) + with open(cache_version_file, "w") as f: + f.write("1") + except Exception: + logger.warn( + f"There was a problem when trying to write in your cache folder ({TRANSFORMERS_CACHE}). You should set " + "the environment variable TRANSFORMERS_CACHE to a writable directory." + ) diff --git a/tests/test_configuration_common.py b/tests/test_configuration_common.py index b6c8ed77dc3571..397346c7deec77 100644 --- a/tests/test_configuration_common.py +++ b/tests/test_configuration_common.py @@ -345,14 +345,14 @@ def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 - response_mock.headers = [] + response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError # Download this model to make sure it's in the cache. _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. - with mock.patch("transformers.utils.hub.requests.head", return_value=response_mock) as mock_head: + with mock.patch("requests.request", return_value=response_mock) as mock_head: _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() diff --git a/tests/test_feature_extraction_common.py b/tests/test_feature_extraction_common.py index a822b75cc5eb62..3ecf89a908672f 100644 --- a/tests/test_feature_extraction_common.py +++ b/tests/test_feature_extraction_common.py @@ -170,13 +170,13 @@ def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 - response_mock.headers = [] + response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError # Download this model to make sure it's in the cache. _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # Under the mock environment we get a 500 error when trying to reach the model. - with mock.patch("transformers.utils.hub.requests.head", return_value=response_mock) as mock_head: + with mock.patch("requests.request", return_value=response_mock) as mock_head: _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") # This check we did call the fake head request mock_head.assert_called() diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index c05771336e6365..8f80d7fa42f791 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2925,14 +2925,14 @@ def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 - response_mock.headers = [] + response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError # Download this model to make sure it's in the cache. _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. - with mock.patch("transformers.utils.hub.requests.head", return_value=response_mock) as mock_head: + with mock.patch("requests.request", return_value=response_mock) as mock_head: _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index 15855e6a1f40e6..abf26af2b65116 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -1922,14 +1922,14 @@ def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 - response_mock.headers = [] + response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError # Download this model to make sure it's in the cache. _ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. - with mock.patch("transformers.utils.hub.requests.head", return_value=response_mock) as mock_head: + with mock.patch("requests.request", return_value=response_mock) as mock_head: _ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() diff --git a/tests/test_tokenization_common.py b/tests/test_tokenization_common.py index e1ed8530fdbdea..5941a571189960 100644 --- a/tests/test_tokenization_common.py +++ b/tests/test_tokenization_common.py @@ -3829,14 +3829,14 @@ def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 - response_mock.headers = [] + response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError # Download this model to make sure it's in the cache. _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the model. - with mock.patch("transformers.utils.hub.requests.head", return_value=response_mock) as mock_head: + with mock.patch("requests.request", return_value=response_mock) as mock_head: _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() From 280db2e39c1e586389df4e46f2b895fc092911bb Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 5 Aug 2022 16:49:54 +0200 Subject: [PATCH 025/539] Fix `test_dbmdz_english` by updating expected values (#18482) Co-authored-by: ydshieh --- .../test_pipelines_token_classification.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/tests/pipelines/test_pipelines_token_classification.py b/tests/pipelines/test_pipelines_token_classification.py index 1d71529cdfee6b..bc4eaef06255e3 100644 --- a/tests/pipelines/test_pipelines_token_classification.py +++ b/tests/pipelines/test_pipelines_token_classification.py @@ -284,9 +284,9 @@ def test_dbmdz_english(self): self.assertEqual( nested_simplify(output), [ - {"entity": "I-PER", "score": 0.997, "word": "En", "start": 0, "end": 2, "index": 1}, - {"entity": "I-PER", "score": 0.996, "word": "##zo", "start": 2, "end": 4, "index": 2}, - {"entity": "I-ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24, "index": 7}, + {"entity": "I-PER", "score": 0.998, "word": "En", "start": 0, "end": 2, "index": 1}, + {"entity": "I-PER", "score": 0.997, "word": "##zo", "start": 2, "end": 4, "index": 2}, + {"entity": "I-ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20, "index": 6}, ], ) @@ -295,8 +295,8 @@ def test_dbmdz_english(self): self.assertEqual( nested_simplify(output), [ - {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, - {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24}, + {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, + {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) @@ -305,8 +305,8 @@ def test_dbmdz_english(self): self.assertEqual( nested_simplify(output[:3]), [ - {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, - {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24}, + {"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4}, + {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) @@ -315,8 +315,8 @@ def test_dbmdz_english(self): self.assertEqual( nested_simplify(output[:3]), [ - {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, - {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24}, + {"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4}, + {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) @@ -325,8 +325,8 @@ def test_dbmdz_english(self): self.assertEqual( nested_simplify(output), [ - {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, - {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 22, "end": 24}, + {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, + {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) From faacdf007b23feba7e6735aff1e8b97a08a17d42 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 5 Aug 2022 13:14:00 -0400 Subject: [PATCH 026/539] Move cache folder to huggingface/hub for consistency with hf_hub (#18492) * Move cache folder to just huggingface * Thank you VsCode for this needless import * Move to hub * Forgot one --- docs/source/en/installation.mdx | 8 ++++---- src/transformers/utils/hub.py | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/docs/source/en/installation.mdx b/docs/source/en/installation.mdx index f20490115842c3..4ff4e04436c74e 100644 --- a/docs/source/en/installation.mdx +++ b/docs/source/en/installation.mdx @@ -139,11 +139,11 @@ conda install -c huggingface transformers ## Cache setup -Pretrained models are downloaded and locally cached at: `~/.cache/huggingface/transformers/`. This is the default directory given by the shell environment variable `TRANSFORMERS_CACHE`. On Windows, the default directory is given by `C:\Users\username\.cache\huggingface\transformers`. You can change the shell environment variables shown below - in order of priority - to specify a different cache directory: +Pretrained models are downloaded and locally cached at: `~/.cache/huggingface/hub`. This is the default directory given by the shell environment variable `TRANSFORMERS_CACHE`. On Windows, the default directory is given by `C:\Users\username\.cache\huggingface\hub`. You can change the shell environment variables shown below - in order of priority - to specify a different cache directory: -1. Shell environment variable (default): `TRANSFORMERS_CACHE`. -2. Shell environment variable: `HF_HOME` + `transformers/`. -3. Shell environment variable: `XDG_CACHE_HOME` + `/huggingface/transformers`. +1. Shell environment variable (default): `HUGGINGFACE_HUB_CACHE` or `TRANSFORMERS_CACHE`. +2. Shell environment variable: `HF_HOME`. +3. Shell environment variable: `XDG_CACHE_HOME` + `/huggingface`. diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 9e81654cda7e1a..7fa4c0a151ace4 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -81,7 +81,7 @@ def is_offline_mode(): hf_cache_home = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) -default_cache_path = os.path.join(hf_cache_home, "transformers") +default_cache_path = os.path.join(hf_cache_home, "hub") # Onetime move from the old location to the new one if no ENV variable has been set. if ( @@ -102,7 +102,8 @@ def is_offline_mode(): PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) -TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) +HUGGINGFACE_HUB_CACHE = os.getenv("HUGGINGFACE_HUB_CACHE", PYTORCH_TRANSFORMERS_CACHE) +TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", HUGGINGFACE_HUB_CACHE) HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules")) TRANSFORMERS_DYNAMIC_MODULE_NAME = "transformers_modules" SESSION_ID = uuid4().hex @@ -1475,9 +1476,16 @@ def move_to_new_cache(file, repo, filename, revision, etag, commit_hash): clean_files_for(file) -def move_cache(cache_dir=None, token=None): +def move_cache(cache_dir=None, new_cache_dir=None, token=None): + if new_cache_dir is None: + new_cache_dir = TRANSFORMERS_CACHE if cache_dir is None: - cache_dir = TRANSFORMERS_CACHE + # Migrate from old cache in .cache/huggingface/hub + old_cache = Path(TRANSFORMERS_CACHE).parent / "transformers" + if os.path.isdir(str(old_cache)): + cache_dir = str(old_cache) + else: + cache_dir = new_cache_dir if token is None: token = HfFolder.get_token() cached_files = get_all_cached_files(cache_dir=cache_dir) From 9d64f7f00c7904c1e8eaccea0d18b7434f2d9cbc Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 5 Aug 2022 19:17:51 +0200 Subject: [PATCH 027/539] Update some expected values in `quicktour.mdx` for `resampy 0.3.0` (#18484) Co-authored-by: ydshieh --- docs/source/en/quicktour.mdx | 2 +- docs/source/es/quicktour.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/quicktour.mdx b/docs/source/en/quicktour.mdx index dcadf6da34884f..c5b333bf5694fe 100644 --- a/docs/source/en/quicktour.mdx +++ b/docs/source/en/quicktour.mdx @@ -136,7 +136,7 @@ Let's extract the raw waveform arrays of the first 4 samples and pass it as a li ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) -['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I TURN A JOIN A COUNT'] +['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FODING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I THURN A JOIN A COUNT'] ``` For a larger dataset where the inputs are big (like in speech or vision), you will want to pass along a generator instead of a list that loads all the inputs in memory. See the [pipeline documentation](./main_classes/pipelines) for more information. diff --git a/docs/source/es/quicktour.mdx b/docs/source/es/quicktour.mdx index 9de9e9af4b6e55..408c3fa375a074 100644 --- a/docs/source/es/quicktour.mdx +++ b/docs/source/es/quicktour.mdx @@ -129,7 +129,7 @@ Extraigamos las matrices de onda cruda (raw waveform, en inglés) de las primera ```py >>> resultado = reconocedor_de_voz(dataset[:4]["audio"]) >>> print([d["text"] for d in resultado]) -['ahora buenas e a ver tengo un problema como vuestra aplicación resulta que que quiero hacer una transferencia bancaria a una cuenta conocida pero me da error la aplicación a ver que a ver que puede ser', 'la aplicación no cargue salda de mi nueva cuenta', 'hola tengo un problema con la aplicación no carga y y tampoco veo que carga el saldo de mi cuenta nueva dice que la aplicación está siendo reparada y ahora no puedo aceder a mi cuenta no necesito inmediatamente', 'ora buena la aplicación no se carga la viladad no carga el saldo de mi cuenta nueva dice que la villadenta siendo reparada y oro no puede hacer a mi cuenta'] +['ahora buenas eh a ver tengo un problema con vuestra aplicación resulta que que quiero hacer una transferencia bancaria a una cuenta conocida pero me da error la aplicación a ver que a ver que puede ser', 'la aplicación no cargue saldo de mi nueva cuenta', 'hola tengo un problema con la aplicación no carga y y tampoco veo que carga el saldo de mi cuenta nueva dice que la aplicación está siendo reparada y ahora no puedo acceder a mi cuenta no necesito inmediatamente', 'hora buena la aplicación no se carga la vileza no carga el saldo de mi cuenta nueva dice que la villadenta siendo reparada y oro no puedo hacer a mi cuenta'] ``` Para un dataset más grande, donde los inputs son de mayor tamaño (como en habla/audio o visión), querrás pasar un generador en lugar de una lista que carga todos los inputs en memoria. Ve la [documentación del pipeline](./main_classes/pipelines) para más información. From 56a55d3ce4dc6b1e0df1bb1415d2c62bc575fb7b Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Fri, 5 Aug 2022 13:24:53 -0400 Subject: [PATCH 028/539] Forgot one new_ for cache migration --- src/transformers/utils/hub.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 7fa4c0a151ace4..81fd4151653194 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -1514,7 +1514,7 @@ def move_cache(cache_dir=None, new_cache_dir=None, token=None): # Not a file from huggingface.co continue - repo = os.path.join(cache_dir, url_info["repo"]) + repo = os.path.join(new_cache_dir, url_info["repo"]) move_to_new_cache( file=os.path.join(cache_dir, file_info["file"]), repo=repo, From 38d656041b47a35e9d8d0294f5498ac68765c281 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 5 Aug 2022 19:27:19 +0200 Subject: [PATCH 029/539] disable Onnx test for google/long-t5-tglobal-base (#18454) Co-authored-by: ydshieh --- tests/onnx/test_onnx_v2.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index c15910734f3adb..cfc58dd335c30d 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -228,7 +228,9 @@ def test_values_override(self): ("blenderbot", "facebook/blenderbot-400M-distill"), ("bigbird-pegasus", "google/bigbird-pegasus-large-arxiv"), ("longt5", "google/long-t5-local-base"), - ("longt5", "google/long-t5-tglobal-base"), + # Disable for now as it causes fatal error `Floating point exception (core dumped)` and the subsequential tests are + # not run. + # ("longt5", "google/long-t5-tglobal-base"), } # TODO(lewtun): Include the same model types in `PYTORCH_EXPORT_MODELS` once TensorFlow has parity with the PyTorch model implementations. From b8c247b6d0097d221c3d6878b41fe48663b8700b Mon Sep 17 00:00:00 2001 From: Julien Chaumond Date: Fri, 5 Aug 2022 19:29:38 +0200 Subject: [PATCH 030/539] Typo reported by Joel Grus on TWTR (#18493) --- src/transformers/utils/hub.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 81fd4151653194..2488ab8f690865 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -1543,7 +1543,7 @@ def move_cache(cache_dir=None, new_cache_dir=None, token=None): ) else: logger.warn( - "The cache for model files in Transformers v4.22.0 has been udpated. Migrating your old cache. This is a " + "The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a " "one-time only operation. You can interrupt this and resume the migration later on by calling " "`transformers.utils.move_cache()`." ) From 8d1f9039d01f79bfef763dd349d9904c13550c9d Mon Sep 17 00:00:00 2001 From: Julien Chaumond Date: Sat, 6 Aug 2022 09:38:55 +0200 Subject: [PATCH 031/539] =?UTF-8?q?Just=20re-reading=20the=20whole=20doc?= =?UTF-8?q?=20every=20couple=20of=20months=20=F0=9F=98=AC=20(#18489)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Delete valohai.yaml * NLP => ML * typo * website supports https * datasets * 60k + modalities * unrelated link fixing for accelerate * Ok those links were actually broken * Fix link * Make `AutoTokenizer` auto-link * wording tweak * add at least one non-nlp task --- README.md | 8 +- docs/source/en/accelerate.mdx | 10 +- docs/source/en/model_sharing.mdx | 2 +- docs/source/en/perf_train_gpu_one.mdx | 2 +- docs/source/en/pipeline_tutorial.mdx | 2 +- docs/source/en/run_scripts.mdx | 2 +- docs/source/en/task_summary.mdx | 2 +- docs/source/es/accelerate.mdx | 10 +- docs/source/es/model_sharing.mdx | 2 +- docs/source/es/run_scripts.mdx | 2 +- docs/source/it/accelerate.mdx | 10 +- docs/source/it/model_sharing.mdx | 2 +- docs/source/it/run_scripts.mdx | 2 +- docs/source/pt/accelerate.mdx | 10 +- examples/legacy/pytorch-lightning/run_ner.sh | 2 +- .../legacy/token-classification/README.md | 2 +- examples/legacy/token-classification/run.sh | 2 +- examples/pytorch/README.md | 4 +- examples/tensorflow/README.md | 2 +- valohai.yaml | 91 ------------------- 20 files changed, 39 insertions(+), 130 deletions(-) delete mode 100644 valohai.yaml diff --git a/README.md b/README.md index 0cda209bdfc32c..46a4b07c14cd32 100644 --- a/README.md +++ b/README.md @@ -157,7 +157,7 @@ Here we get a list of objects detected in the image, with a box surrounding the You can learn more about the tasks supported by the `pipeline` API in [this tutorial](https://huggingface.co/docs/transformers/task_summary). -To download and use any of the pretrained models on your given task, all it takes is three lines of code. Here is the PyTorch version: +In addition to `pipeline`, to download and use any of the pretrained models on your given task, all it takes is three lines of code. Here is the PyTorch version: ```python >>> from transformers import AutoTokenizer, AutoModel @@ -181,7 +181,7 @@ And here is the equivalent code for TensorFlow: The tokenizer is responsible for all the preprocessing the pretrained model expects, and can be called directly on a single string (as in the above examples) or a list. It will output a dictionary that you can use in downstream code or simply directly pass to your model using the ** argument unpacking operator. -The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use normally. [This tutorial](https://huggingface.co/docs/transformers/training) explains how to integrate such a model into a classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune on a new dataset. +The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use as usual. [This tutorial](https://huggingface.co/docs/transformers/training) explains how to integrate such a model into a classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune on a new dataset. ## Why should I use transformers? @@ -194,7 +194,7 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta 1. Lower compute costs, smaller carbon footprint: - Researchers can share trained models instead of always retraining. - Practitioners can reduce compute time and production costs. - - Dozens of architectures with over 20,000 pretrained models, some in more than 100 languages. + - Dozens of architectures with over 60,000 pretrained models across all modalities. 1. Choose the right framework for every part of a model's lifetime: - Train state-of-the-art models in 3 lines of code. @@ -209,7 +209,7 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta ## Why shouldn't I use transformers? - This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files. -- The training API is not intended to work on any model but is optimized to work with the models provided by the library. For generic machine learning loops, you should use another library. +- The training API is not intended to work on any model but is optimized to work with the models provided by the library. For generic machine learning loops, you should use another library (possibly, [Accelerate](https://huggingface.co/docs/accelerate)). - While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/main/examples) are just that: examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. ## Installation diff --git a/docs/source/en/accelerate.mdx b/docs/source/en/accelerate.mdx index 58b6e6958fa2d6..c215758d47b6a3 100644 --- a/docs/source/en/accelerate.mdx +++ b/docs/source/en/accelerate.mdx @@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License. # Distributed training with 🤗 Accelerate -As models get bigger, parallelism has emerged as a strategy for training larger models on limited hardware and accelerating training speed by several orders of magnitude. At Hugging Face, we created the [🤗 Accelerate](https://huggingface.co/docs/accelerate/index.html) library to help users easily train a 🤗 Transformers model on any type of distributed setup, whether it is multiple GPU's on one machine or multiple GPU's across several machines. In this tutorial, learn how to customize your native PyTorch training loop to enable training in a distributed environment. +As models get bigger, parallelism has emerged as a strategy for training larger models on limited hardware and accelerating training speed by several orders of magnitude. At Hugging Face, we created the [🤗 Accelerate](https://huggingface.co/docs/accelerate) library to help users easily train a 🤗 Transformers model on any type of distributed setup, whether it is multiple GPU's on one machine or multiple GPU's across several machines. In this tutorial, learn how to customize your native PyTorch training loop to enable training in a distributed environment. ## Setup @@ -22,7 +22,7 @@ Get started by installing 🤗 Accelerate: pip install accelerate ``` -Then import and create an [`Accelerator`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator) object. `Accelerator` will automatically detect your type of distributed setup and initialize all the necessary components for training. You don't need to explicitly place your model on a device. +Then import and create an [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator) object. `Accelerator` will automatically detect your type of distributed setup and initialize all the necessary components for training. You don't need to explicitly place your model on a device. ```py >>> from accelerate import Accelerator @@ -32,7 +32,7 @@ Then import and create an [`Accelerator`](https://huggingface.co/docs/accelerate ## Prepare to accelerate -The next step is to pass all the relevant training objects to the [`prepare`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.prepare) method. This includes your training and evaluation DataLoaders, a model and an optimizer: +The next step is to pass all the relevant training objects to the [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare) method. This includes your training and evaluation DataLoaders, a model and an optimizer: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( @@ -42,7 +42,7 @@ The next step is to pass all the relevant training objects to the [`prepare`](ht ## Backward -The last addition is to replace the typical `loss.backward()` in your training loop with 🤗 Accelerate's [`backward`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.backward) method: +The last addition is to replace the typical `loss.backward()` in your training loop with 🤗 Accelerate's [`backward`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward) method: ```py >>> for epoch in range(num_epochs): @@ -129,4 +129,4 @@ accelerate launch train.py >>> notebook_launcher(training_function) ``` -For more information about 🤗 Accelerate and it's rich features, refer to the [documentation](https://huggingface.co/docs/accelerate/index.html). \ No newline at end of file +For more information about 🤗 Accelerate and it's rich features, refer to the [documentation](https://huggingface.co/docs/accelerate). \ No newline at end of file diff --git a/docs/source/en/model_sharing.mdx b/docs/source/en/model_sharing.mdx index 24da63348c8a83..e6bd7fc4a6afe2 100644 --- a/docs/source/en/model_sharing.mdx +++ b/docs/source/en/model_sharing.mdx @@ -225,4 +225,4 @@ To make sure users understand your model's capabilities, limitations, potential * Manually creating and uploading a `README.md` file. * Clicking on the **Edit model card** button in your model repository. -Take a look at the DistilBert [model card](https://huggingface.co/distilbert-base-uncased) for a good example of the type of information a model card should include. For more details about other options you can control in the `README.md` file such as a model's carbon footprint or widget examples, refer to the documentation [here](https://huggingface.co/docs/hub/model-repos). +Take a look at the DistilBert [model card](https://huggingface.co/distilbert-base-uncased) for a good example of the type of information a model card should include. For more details about other options you can control in the `README.md` file such as a model's carbon footprint or widget examples, refer to the documentation [here](https://huggingface.co/docs/hub/models-cards). diff --git a/docs/source/en/perf_train_gpu_one.mdx b/docs/source/en/perf_train_gpu_one.mdx index 0c130b41722388..ba5bcb456d2220 100644 --- a/docs/source/en/perf_train_gpu_one.mdx +++ b/docs/source/en/perf_train_gpu_one.mdx @@ -609,7 +609,7 @@ for step, batch in enumerate(dataloader, start=1): optimizer.zero_grad() ``` -First we wrap the dataset in a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). Then we can enable gradient checkpointing by calling the model's [`~PreTrainedModel.gradient_checkpointing_enable`] method. When we initialize the [`Accelerator`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator) we can specifiy if we want to use mixed precision training and it will take care of it for us in the [`prepare`] call. During the [`prepare`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.prepare) call the dataloader will also be distributed across workers should we use multiple GPUs. We use the same 8-bit optimizer from the earlier experiments. +First we wrap the dataset in a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). Then we can enable gradient checkpointing by calling the model's [`~PreTrainedModel.gradient_checkpointing_enable`] method. When we initialize the [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator) we can specifiy if we want to use mixed precision training and it will take care of it for us in the [`prepare`] call. During the [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare) call the dataloader will also be distributed across workers should we use multiple GPUs. We use the same 8-bit optimizer from the earlier experiments. Finally, we can write the main training loop. Note that the `backward` call is handled by 🤗 Accelerate. We can also see how gradient accumulation works: we normalize the loss so we get the average at the end of accumulation and once we have enough steps we run the optimization. Now the question is: does this use the same amount of memory as the previous steps? Let's check: diff --git a/docs/source/en/pipeline_tutorial.mdx b/docs/source/en/pipeline_tutorial.mdx index 274f97f0d6cc92..7929113209748d 100644 --- a/docs/source/en/pipeline_tutorial.mdx +++ b/docs/source/en/pipeline_tutorial.mdx @@ -67,7 +67,7 @@ Any additional parameters for your task can also be included in the [`pipeline`] ### Choose a model and tokenizer -The [`pipeline`] accepts any model from the [Model Hub](https://huggingface.co/models). There are tags on the Model Hub that allow you to filter for a model you'd like to use for your task. Once you've picked an appropriate model, load it with the corresponding `AutoModelFor` and [`AutoTokenizer'] class. For example, load the [`AutoModelForCausalLM`] class for a causal language modeling task: +The [`pipeline`] accepts any model from the [Model Hub](https://huggingface.co/models). There are tags on the Model Hub that allow you to filter for a model you'd like to use for your task. Once you've picked an appropriate model, load it with the corresponding `AutoModelFor` and [`AutoTokenizer`] class. For example, load the [`AutoModelForCausalLM`] class for a causal language modeling task: ```py >>> from transformers import AutoTokenizer, AutoModelForCausalLM diff --git a/docs/source/en/run_scripts.mdx b/docs/source/en/run_scripts.mdx index 368bd910efc762..58d6b8dd3e208c 100644 --- a/docs/source/en/run_scripts.mdx +++ b/docs/source/en/run_scripts.mdx @@ -187,7 +187,7 @@ python run_summarization.py \ ## Run a script with 🤗 Accelerate -🤗 [Accelerate](https://huggingface.co/docs/accelerate/index.html) is a PyTorch-only library that offers a unified method for training a model on several types of setups (CPU-only, multiple GPUs, TPUs) while maintaining complete visibility into the PyTorch training loop. Make sure you have 🤗 Accelerate installed if you don't already have it: +🤗 [Accelerate](https://huggingface.co/docs/accelerate) is a PyTorch-only library that offers a unified method for training a model on several types of setups (CPU-only, multiple GPUs, TPUs) while maintaining complete visibility into the PyTorch training loop. Make sure you have 🤗 Accelerate installed if you don't already have it: > Note: As Accelerate is rapidly developing, the git version of accelerate must be installed to run the scripts ```bash diff --git a/docs/source/en/task_summary.mdx b/docs/source/en/task_summary.mdx index 27781ccc0503f0..18c442ac2abb02 100644 --- a/docs/source/en/task_summary.mdx +++ b/docs/source/en/task_summary.mdx @@ -16,7 +16,7 @@ specific language governing permissions and limitations under the License. This page shows the most frequent use-cases when using the library. The models available allow for many different configurations and a great versatility in use-cases. The most simple ones are presented here, showcasing usage for -tasks such as question answering, sequence classification, named entity recognition and others. +tasks such as image classification, question answering, sequence classification, named entity recognition and others. These examples leverage auto-models, which are classes that will instantiate a model according to a given checkpoint, automatically selecting the correct model architecture. Please check the [`AutoModel`] documentation diff --git a/docs/source/es/accelerate.mdx b/docs/source/es/accelerate.mdx index 43482106dc223e..6065bc110a1d71 100644 --- a/docs/source/es/accelerate.mdx +++ b/docs/source/es/accelerate.mdx @@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License. # Entrenamiento distribuido con 🤗 Accelerate -El paralelismo ha emergido como una estrategia para entrenar modelos grandes en hardware limitado e incrementar la velocidad de entrenamiento en varios órdenes de magnitud. En Hugging Face creamos la biblioteca [🤗 Accelerate](https://huggingface.co/docs/accelerate/index.html) para ayudar a los usuarios a entrenar modelos 🤗 Transformers en cualquier tipo de configuración distribuida, ya sea en una máquina con múltiples GPUs o en múltiples GPUs distribuidas entre muchas máquinas. En este tutorial aprenderás cómo personalizar tu bucle de entrenamiento de PyTorch nativo para poder entrenar en entornos distribuidos. +El paralelismo ha emergido como una estrategia para entrenar modelos grandes en hardware limitado e incrementar la velocidad de entrenamiento en varios órdenes de magnitud. En Hugging Face creamos la biblioteca [🤗 Accelerate](https://huggingface.co/docs/accelerate) para ayudar a los usuarios a entrenar modelos 🤗 Transformers en cualquier tipo de configuración distribuida, ya sea en una máquina con múltiples GPUs o en múltiples GPUs distribuidas entre muchas máquinas. En este tutorial aprenderás cómo personalizar tu bucle de entrenamiento de PyTorch nativo para poder entrenar en entornos distribuidos. ## Configuración @@ -22,7 +22,7 @@ Empecemos por instalar 🤗 Accelerate: pip install accelerate ``` -Luego, importamos y creamos un objeto [`Accelerator`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator). `Accelerator` detectará automáticamente el tipo de configuración distribuida que tengas disponible e inicializará todos los componentes necesarios para el entrenamiento. No necesitas especificar el dispositivo en donde se debe colocar tu modelo. +Luego, importamos y creamos un objeto [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator). `Accelerator` detectará automáticamente el tipo de configuración distribuida que tengas disponible e inicializará todos los componentes necesarios para el entrenamiento. No necesitas especificar el dispositivo en donde se debe colocar tu modelo. ```py >>> from accelerate import Accelerator @@ -32,7 +32,7 @@ Luego, importamos y creamos un objeto [`Accelerator`](https://huggingface.co/doc ## Prepárate para acelerar -Pasa todos los objetos relevantes para el entrenamiento al método [`prepare`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.prepare). Esto incluye los DataLoaders de entrenamiento y evaluación, un modelo y un optimizador: +Pasa todos los objetos relevantes para el entrenamiento al método [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare). Esto incluye los DataLoaders de entrenamiento y evaluación, un modelo y un optimizador: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( @@ -42,7 +42,7 @@ Pasa todos los objetos relevantes para el entrenamiento al método [`prepare`](h ## Backward -Por último, reemplaza el típico `loss.backward()` en tu bucle de entrenamiento con el método [`backward`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.backward) de 🤗 Accelerate: +Por último, reemplaza el típico `loss.backward()` en tu bucle de entrenamiento con el método [`backward`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward) de 🤗 Accelerate: ```py >>> for epoch in range(num_epochs): @@ -129,4 +129,4 @@ accelerate launch train.py >>> notebook_launcher(training_function) ``` -Para obtener más información sobre 🤗 Accelerate y sus numerosas funciones, consulta la [documentación](https://huggingface.co/docs/accelerate/index.html). +Para obtener más información sobre 🤗 Accelerate y sus numerosas funciones, consulta la [documentación](https://huggingface.co/docs/accelerate). diff --git a/docs/source/es/model_sharing.mdx b/docs/source/es/model_sharing.mdx index 072b80ab398b85..cf3215dc86d742 100644 --- a/docs/source/es/model_sharing.mdx +++ b/docs/source/es/model_sharing.mdx @@ -216,4 +216,4 @@ Para asegurarnos que los usuarios entiendan las capacidades de tu modelo, sus li * Elaborando y subiendo manualmente el archivo`README.md`. * Dando click en el botón **Edit model card** dentro del repositorio. -Toma un momento para ver la [tarjeta de modelo](https://huggingface.co/distilbert-base-uncased) de DistilBert para que tengas un buen ejemplo del tipo de información que debería incluir. Consulta [la documentación](https://huggingface.co/docs/hub/model-repos) para más detalles acerca de otras opciones que puedes controlar dentro del archivo `README.md` como la huella de carbono del modelo o ejemplos de widgets. Consulta la documentación [aquí] (https://huggingface.co/docs/hub/model-repos). +Toma un momento para ver la [tarjeta de modelo](https://huggingface.co/distilbert-base-uncased) de DistilBert para que tengas un buen ejemplo del tipo de información que debería incluir. Consulta [la documentación](https://huggingface.co/docs/hub/models-cards) para más detalles acerca de otras opciones que puedes controlar dentro del archivo `README.md` como la huella de carbono del modelo o ejemplos de widgets. Consulta la documentación [aquí] (https://huggingface.co/docs/hub/models-cards). diff --git a/docs/source/es/run_scripts.mdx b/docs/source/es/run_scripts.mdx index 9c107408456f14..73dd1ba320c1f6 100644 --- a/docs/source/es/run_scripts.mdx +++ b/docs/source/es/run_scripts.mdx @@ -187,7 +187,7 @@ python run_summarization.py \ ## Ejecutar un script con 🤗 Accelerate -🤗 [Accelerate](https://huggingface.co/docs/accelerate/index.html) es una biblioteca exclusiva de PyTorch que ofrece un método unificado para entrenar un modelo en varios tipos de configuraciones (solo CPU, GPU múltiples, TPU) mientras mantiene una visibilidad completa en el ciclo de entrenamiento de PyTorch. Asegúrate de tener 🤗 Accelerate instalado si aún no lo tienes: +🤗 [Accelerate](https://huggingface.co/docs/accelerate) es una biblioteca exclusiva de PyTorch que ofrece un método unificado para entrenar un modelo en varios tipos de configuraciones (solo CPU, GPU múltiples, TPU) mientras mantiene una visibilidad completa en el ciclo de entrenamiento de PyTorch. Asegúrate de tener 🤗 Accelerate instalado si aún no lo tienes: > Nota: Como Accelerate se está desarrollando rápidamente, debes instalar la versión git de Accelerate para ejecutar los scripts ```bash diff --git a/docs/source/it/accelerate.mdx b/docs/source/it/accelerate.mdx index 75abf65c7fcd1f..20dc1a7ff90b53 100644 --- a/docs/source/it/accelerate.mdx +++ b/docs/source/it/accelerate.mdx @@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License. # Allenamento distribuito con 🤗 Accelerate -La parallelizzazione è emersa come strategia per allenare modelli sempre più grandi su hardware limitato e accelerarne la velocità di allenamento di diversi ordini di magnitudine. In Hugging Face, abbiamo creato la libreria [🤗 Accelerate](https://huggingface.co/docs/accelerate/index.html) per aiutarti ad allenare in modo semplice un modello 🤗 Transformers su qualsiasi tipo di configurazione distribuita, sia che si tratti di più GPU su una sola macchina o di più GPU su più macchine. In questo tutorial, imparerai come personalizzare il training loop nativo di PyTorch per consentire l'addestramento in un ambiente distribuito. +La parallelizzazione è emersa come strategia per allenare modelli sempre più grandi su hardware limitato e accelerarne la velocità di allenamento di diversi ordini di magnitudine. In Hugging Face, abbiamo creato la libreria [🤗 Accelerate](https://huggingface.co/docs/accelerate) per aiutarti ad allenare in modo semplice un modello 🤗 Transformers su qualsiasi tipo di configurazione distribuita, sia che si tratti di più GPU su una sola macchina o di più GPU su più macchine. In questo tutorial, imparerai come personalizzare il training loop nativo di PyTorch per consentire l'addestramento in un ambiente distribuito. ## Configurazione @@ -22,7 +22,7 @@ Inizia installando 🤗 Accelerate: pip install accelerate ``` -Poi importa e crea un oggetto [`Accelerator`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator). `Accelerator` rileverà automaticamente il tuo setup distribuito e inizializzerà tutte le componenti necessarie per l'allenamento. Non dovrai allocare esplicitamente il tuo modello su un device. +Poi importa e crea un oggetto [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator). `Accelerator` rileverà automaticamente il tuo setup distribuito e inizializzerà tutte le componenti necessarie per l'allenamento. Non dovrai allocare esplicitamente il tuo modello su un device. ```py >>> from accelerate import Accelerator @@ -32,7 +32,7 @@ Poi importa e crea un oggetto [`Accelerator`](https://huggingface.co/docs/accele ## Preparati ad accelerare -Il prossimo passo è quello di passare tutti gli oggetti rilevanti per l'allenamento al metodo [`prepare`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.prepare). Questo include i tuoi DataLoaders per l'allenamento e per la valutazione, un modello e un ottimizzatore: +Il prossimo passo è quello di passare tutti gli oggetti rilevanti per l'allenamento al metodo [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare). Questo include i tuoi DataLoaders per l'allenamento e per la valutazione, un modello e un ottimizzatore: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( @@ -42,7 +42,7 @@ Il prossimo passo è quello di passare tutti gli oggetti rilevanti per l'allenam ## Backward -Infine, sostituisci il tipico metodo `loss.backward()` nel tuo loop di allenamento con il metodo [`backward`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.backward) di 🤗 Accelerate: +Infine, sostituisci il tipico metodo `loss.backward()` nel tuo loop di allenamento con il metodo [`backward`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward) di 🤗 Accelerate: ```py >>> for epoch in range(num_epochs): @@ -129,4 +129,4 @@ La libreria 🤗 Accelerate può anche essere utilizzata in un notebook se stai >>> notebook_launcher(training_function) ``` -Per maggiori informazioni relative a 🤗 Accelerate e le sue numerose funzionalità, fai riferimento alla [documentazione](https://huggingface.co/docs/accelerate/index.html). \ No newline at end of file +Per maggiori informazioni relative a 🤗 Accelerate e le sue numerose funzionalità, fai riferimento alla [documentazione](https://huggingface.co/docs/accelerate). \ No newline at end of file diff --git a/docs/source/it/model_sharing.mdx b/docs/source/it/model_sharing.mdx index a60fe50b2ba578..87ba2b5b342140 100644 --- a/docs/source/it/model_sharing.mdx +++ b/docs/source/it/model_sharing.mdx @@ -231,4 +231,4 @@ Per assicurarti che chiunque possa comprendere le abilità, limitazioni, i poten * Creando manualmente e caricando un file `README.md`. * Premendo sul pulsante **Edit model card** nel repository del tuo modello. -Dai un'occhiata alla [scheda del modello](https://huggingface.co/distilbert-base-uncased) di DistilBert per avere un buon esempio del tipo di informazioni che una scheda di un modello deve includere. Per maggiori dettagli legati ad altre opzioni che puoi controllare nel file `README.md`, come l'impatto ambientale o widget di esempio, fai riferimento alla documentazione [qui](https://huggingface.co/docs/hub/model-repos). +Dai un'occhiata alla [scheda del modello](https://huggingface.co/distilbert-base-uncased) di DistilBert per avere un buon esempio del tipo di informazioni che una scheda di un modello deve includere. Per maggiori dettagli legati ad altre opzioni che puoi controllare nel file `README.md`, come l'impatto ambientale o widget di esempio, fai riferimento alla documentazione [qui](https://huggingface.co/docs/hub/models-cards). diff --git a/docs/source/it/run_scripts.mdx b/docs/source/it/run_scripts.mdx index 4e3f639efb9dbf..3ffd58a62830aa 100644 --- a/docs/source/it/run_scripts.mdx +++ b/docs/source/it/run_scripts.mdx @@ -187,7 +187,7 @@ python run_summarization.py \ ## Esegui uno script con 🤗 Accelerate -🤗 [Accelerate](https://huggingface.co/docs/accelerate/index.html) è una libreria compatibile solo con PyTorch che offre un metodo unificato per addestrare modelli su diverse tipologie di configurazioni (CPU, multiple GPU, TPU) mantenendo una completa visibilità rispetto al ciclo di training di PyTorch. Assicurati di aver effettuato l'installazione di 🤗 Accelerate, nel caso non lo avessi fatto: +🤗 [Accelerate](https://huggingface.co/docs/accelerate) è una libreria compatibile solo con PyTorch che offre un metodo unificato per addestrare modelli su diverse tipologie di configurazioni (CPU, multiple GPU, TPU) mantenendo una completa visibilità rispetto al ciclo di training di PyTorch. Assicurati di aver effettuato l'installazione di 🤗 Accelerate, nel caso non lo avessi fatto: > Nota: dato che Accelerate è in rapido sviluppo, è necessario installare la versione proveniente da git per eseguire gli script: ```bash diff --git a/docs/source/pt/accelerate.mdx b/docs/source/pt/accelerate.mdx index 0e2257faceff84..59dbd96a83b26a 100644 --- a/docs/source/pt/accelerate.mdx +++ b/docs/source/pt/accelerate.mdx @@ -13,7 +13,7 @@ specific language governing permissions and limitations under the License. # Treinamento distribuído com o 🤗 Accelerate O paralelismo surgiu como uma estratégia para treinar modelos grandes em hardware limitado e aumentar a velocidade -de treinamento em várias órdens de magnitude. Na Hugging Face criamos a biblioteca [🤗 Accelerate](https://huggingface.co/docs/accelerate/index.html) +de treinamento em várias órdens de magnitude. Na Hugging Face criamos a biblioteca [🤗 Accelerate](https://huggingface.co/docs/accelerate) para ajudar os usuários a treinar modelos 🤗 Transformers com qualquer configuração distribuída, seja em uma máquina com múltiplos GPUs ou em múltiplos GPUs distribuidos entre muitas máquinas. Neste tutorial, você irá aprender como personalizar seu laço de treinamento de PyTorch para poder treinar em ambientes distribuídos. @@ -26,7 +26,7 @@ De início, instale o 🤗 Accelerate: pip install accelerate ``` -Logo, devemos importar e criar um objeto [`Accelerator`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator). +Logo, devemos importar e criar um objeto [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator). O `Accelerator` detectará automáticamente a configuração distribuída disponível e inicializará todos os componentes necessários para o treinamento. Não há necessidade portanto de especificar o dispositivo onde deve colocar seu modelo. @@ -38,7 +38,7 @@ componentes necessários para o treinamento. Não há necessidade portanto de es ## Preparando a aceleração -Passe todos os objetos relevantes ao treinamento para o método [`prepare`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.prepare). +Passe todos os objetos relevantes ao treinamento para o método [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare). Isto inclui os DataLoaders de treino e evaluação, um modelo e um otimizador: ```py @@ -49,7 +49,7 @@ Isto inclui os DataLoaders de treino e evaluação, um modelo e um otimizador: ## Backward -Por último, substitua o `loss.backward()` padrão em seu laço de treinamento com o método [`backward`](https://huggingface.co/docs/accelerate/accelerator.html#accelerate.Accelerator.backward) do 🤗 Accelerate: +Por último, substitua o `loss.backward()` padrão em seu laço de treinamento com o método [`backward`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward) do 🤗 Accelerate: ```py >>> for epoch in range(num_epochs): @@ -138,4 +138,4 @@ Encapsule o código responsável pelo treinamento de uma função e passe-o ao ` >>> notebook_launcher(training_function) ``` -Para obter mais informações sobre o 🤗 Accelerate e suas numerosas funções, consulte a [documentación](https://huggingface.co/docs/accelerate/index.html). +Para obter mais informações sobre o 🤗 Accelerate e suas numerosas funções, consulte a [documentación](https://huggingface.co/docs/accelerate/index). diff --git a/examples/legacy/pytorch-lightning/run_ner.sh b/examples/legacy/pytorch-lightning/run_ner.sh index 2913473eb8cdef..a5b185aa960d09 100755 --- a/examples/legacy/pytorch-lightning/run_ner.sh +++ b/examples/legacy/pytorch-lightning/run_ner.sh @@ -5,7 +5,7 @@ pip install -r ../requirements.txt ## The relevant files are currently on a shared Google ## drive at https://drive.google.com/drive/folders/1kC0I2UGl2ltrluI9NqDjaQJGw5iliw_J -## Monitor for changes and eventually migrate to nlp dataset +## Monitor for changes and eventually migrate to use the `datasets` library curl -L 'https://drive.google.com/uc?export=download&id=1Jjhbal535VVz2ap4v4r_rN1UEHTdLK5P' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > train.txt.tmp curl -L 'https://drive.google.com/uc?export=download&id=1ZfRcQThdtAR5PPRjIDtrVP7BtXSCUBbm' \ diff --git a/examples/legacy/token-classification/README.md b/examples/legacy/token-classification/README.md index cd9c1587032c54..c2fa6eec7282b2 100644 --- a/examples/legacy/token-classification/README.md +++ b/examples/legacy/token-classification/README.md @@ -291,4 +291,4 @@ On the test dataset the following results could be achieved: 05/29/2020 23:34:02 - INFO - __main__ - eval_f1 = 0.47440836543753434 ``` -WNUT’17 is a very difficult task. Current state-of-the-art results on this dataset can be found [here](http://nlpprogress.com/english/named_entity_recognition.html). +WNUT’17 is a very difficult task. Current state-of-the-art results on this dataset can be found [here](https://nlpprogress.com/english/named_entity_recognition.html). diff --git a/examples/legacy/token-classification/run.sh b/examples/legacy/token-classification/run.sh index f5cbf0d50e02ee..b5f1e5f83bc7ff 100755 --- a/examples/legacy/token-classification/run.sh +++ b/examples/legacy/token-classification/run.sh @@ -1,6 +1,6 @@ ## The relevant files are currently on a shared Google ## drive at https://drive.google.com/drive/folders/1kC0I2UGl2ltrluI9NqDjaQJGw5iliw_J -## Monitor for changes and eventually migrate to nlp dataset +## Monitor for changes and eventually migrate to use the `datasets` library curl -L 'https://drive.google.com/uc?export=download&id=1Jjhbal535VVz2ap4v4r_rN1UEHTdLK5P' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > train.txt.tmp curl -L 'https://drive.google.com/uc?export=download&id=1ZfRcQThdtAR5PPRjIDtrVP7BtXSCUBbm' \ diff --git a/examples/pytorch/README.md b/examples/pytorch/README.md index 95d42bfc8b3812..442511ead93a7a 100644 --- a/examples/pytorch/README.md +++ b/examples/pytorch/README.md @@ -15,12 +15,12 @@ limitations under the License. # Examples -This folder contains actively maintained examples of use of 🤗 Transformers using the PyTorch backend, organized along NLP tasks. +This folder contains actively maintained examples of use of 🤗 Transformers using the PyTorch backend, organized by ML task. ## The Big Table of Tasks Here is the list of all our examples: -- with information on whether they are **built on top of `Trainer``** (if not, they still work, they might +- with information on whether they are **built on top of `Trainer`** (if not, they still work, they might just lack some features), - whether or not they have a version using the [🤗 Accelerate](https://github.com/huggingface/accelerate) library. - whether or not they leverage the [🤗 Datasets](https://github.com/huggingface/datasets) library. diff --git a/examples/tensorflow/README.md b/examples/tensorflow/README.md index 967a1a8b7869e4..7936e3d4650950 100644 --- a/examples/tensorflow/README.md +++ b/examples/tensorflow/README.md @@ -15,7 +15,7 @@ limitations under the License. # Examples -This folder contains actively maintained examples of use of 🤗 Transformers organized into different NLP tasks. All examples in this folder are **TensorFlow** examples, and are written using native Keras rather than classes like `TFTrainer`, which we now consider deprecated. If you've previously only used 🤗 Transformers via `TFTrainer`, we highly recommend taking a look at the new style - we think it's a big improvement! +This folder contains actively maintained examples of use of 🤗 Transformers organized into different ML tasks. All examples in this folder are **TensorFlow** examples, and are written using native Keras rather than classes like `TFTrainer`, which we now consider deprecated. If you've previously only used 🤗 Transformers via `TFTrainer`, we highly recommend taking a look at the new style - we think it's a big improvement! In addition, all scripts here now support the [🤗 Datasets](https://github.com/huggingface/datasets) library - you can grab entire datasets just by changing one command-line argument! diff --git a/valohai.yaml b/valohai.yaml deleted file mode 100644 index 14441e27d02d4e..00000000000000 --- a/valohai.yaml +++ /dev/null @@ -1,91 +0,0 @@ ---- - -- step: - name: Execute python examples/text-classification/run_glue.py - image: pytorch/pytorch:nightly-devel-cuda10.0-cudnn7 - command: - - python /valohai/repository/utils/download_glue_data.py --data_dir=/glue_data - - pip install -e . - - pip install -r examples/requirements.txt - - python examples/text-classification/run_glue.py --do_train --data_dir=/glue_data/{parameter-value:task_name} {parameters} - parameters: - - name: model_type - pass-as: --model_type={v} - type: string - default: bert - - name: model_name_or_path - pass-as: --model_name_or_path={v} - type: string - default: bert-base-uncased - - name: task_name - pass-as: --task_name={v} - type: string - default: MRPC - - name: max_seq_length - pass-as: --max_seq_length={v} - description: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. - type: integer - default: 128 - - name: per_gpu_train_batch_size - pass-as: --per_gpu_train_batch_size={v} - description: Batch size per GPU/CPU for training. - type: integer - default: 8 - - name: per_gpu_eval_batch_size - pass-as: --per_gpu_eval_batch_size={v} - description: Batch size per GPU/CPU for evaluation. - type: integer - default: 8 - - name: gradient_accumulation_steps - pass-as: --gradient_accumulation_steps={v} - description: Number of updates steps to accumulate before performing a backward/update pass. - type: integer - default: 1 - - name: learning_rate - pass-as: --learning_rate={v} - description: The initial learning rate for Adam. - type: float - default: 0.00005 - - name: adam_epsilon - pass-as: --adam_epsilon={v} - description: Epsilon for Adam optimizer. - type: float - default: 0.00000001 - - name: max_grad_norm - pass-as: --max_grad_norm={v} - description: Max gradient norm. - type: float - default: 1.0 - - name: num_train_epochs - pass-as: --num_train_epochs={v} - description: Total number of training epochs to perform. - type: integer - default: 3 - - name: max_steps - pass-as: --max_steps={v} - description: If > 0, set total number of training steps to perform. Override num_train_epochs. - type: integer - default: -1 - - name: warmup_steps - pass-as: --warmup_steps={v} - description: Linear warmup over warmup_steps. - type: integer - default: -1 - - name: logging_steps - pass-as: --logging_steps={v} - description: Log every X updates steps. - type: integer - default: 25 - - name: save_steps - pass-as: --save_steps={v} - description: Save checkpoint every X updates steps. - type: integer - default: -1 - - name: output_dir - pass-as: --output_dir={v} - type: string - default: /valohai/outputs - - name: evaluation_strategy - description: The evaluation strategy to use. - type: string - default: steps From 9129fd0377e4d46cb2d0ea28dc1eb91a15f65b77 Mon Sep 17 00:00:00 2001 From: Julien Chaumond Date: Sat, 6 Aug 2022 09:42:55 +0200 Subject: [PATCH 032/539] `transformers-cli login` => `huggingface-cli login` (#18490) * zero chance anyone's using that constant no? * `transformers-cli login` => `huggingface-cli login` * `transformers-cli repo create` => `huggingface-cli repo create` * `make style` --- .../flax/image-captioning/run_image_captioning_flax.py | 2 +- examples/flax/language-modeling/run_bart_dlm_flax.py | 2 +- examples/flax/language-modeling/run_clm_flax.py | 2 +- examples/flax/language-modeling/run_mlm_flax.py | 2 +- examples/flax/language-modeling/run_t5_mlm_flax.py | 2 +- examples/flax/question-answering/run_qa.py | 2 +- examples/flax/summarization/run_summarization_flax.py | 2 +- examples/flax/text-classification/run_flax_glue.py | 2 +- examples/flax/token-classification/run_flax_ner.py | 2 +- examples/flax/vision/run_image_classification.py | 2 +- .../audio-classification/run_audio_classification.py | 2 +- examples/pytorch/contrastive-image-text/run_clip.py | 2 +- .../image-classification/run_image_classification.py | 2 +- examples/pytorch/image-pretraining/run_mae.py | 2 +- examples/pytorch/image-pretraining/run_mim.py | 2 +- examples/pytorch/language-modeling/run_clm.py | 2 +- examples/pytorch/language-modeling/run_mlm.py | 2 +- examples/pytorch/language-modeling/run_plm.py | 2 +- examples/pytorch/multiple-choice/run_swag.py | 2 +- examples/pytorch/question-answering/run_qa.py | 2 +- .../pytorch/question-answering/run_qa_beam_search.py | 2 +- examples/pytorch/question-answering/run_seq2seq_qa.py | 2 +- .../semantic-segmentation/run_semantic_segmentation.py | 2 +- .../speech-recognition/run_speech_recognition_ctc.py | 2 +- .../run_speech_recognition_seq2seq.py | 2 +- examples/pytorch/summarization/run_summarization.py | 2 +- examples/pytorch/text-classification/run_glue.py | 2 +- examples/pytorch/text-classification/run_xnli.py | 2 +- examples/pytorch/token-classification/run_ner.py | 2 +- examples/pytorch/translation/run_translation.py | 2 +- .../research_projects/layoutlmv3/run_funsd_cord.py | 2 +- examples/research_projects/mlm_wwm/run_mlm_wwm.py | 2 +- .../quantization-qdqbert/run_quant_qa.py | 2 +- .../run_speech_recognition_ctc_bnb.py | 2 +- .../run_speech_recognition_ctc_streaming.py | 2 +- .../research_projects/tapex/run_tabfact_with_tapex.py | 2 +- .../research_projects/tapex/run_wikisql_with_tapex.py | 2 +- .../tapex/run_wikitablequestions_with_tapex.py | 2 +- examples/research_projects/xtreme-s/run_xtreme_s.py | 2 +- examples/tensorflow/language-modeling/run_clm.py | 2 +- examples/tensorflow/language-modeling/run_mlm.py | 2 +- examples/tensorflow/multiple-choice/run_swag.py | 2 +- examples/tensorflow/question-answering/run_qa.py | 2 +- examples/tensorflow/summarization/run_summarization.py | 2 +- examples/tensorflow/text-classification/run_glue.py | 2 +- .../text-classification/run_text_classification.py | 2 +- examples/tensorflow/token-classification/run_ner.py | 2 +- examples/tensorflow/translation/run_translation.py | 2 +- scripts/tatoeba/README.md | 2 +- scripts/tatoeba/upload_models.sh | 2 +- src/transformers/commands/user.py | 5 +---- src/transformers/configuration_utils.py | 2 +- src/transformers/dynamic_module_utils.py | 4 ++-- src/transformers/feature_extraction_utils.py | 2 +- src/transformers/modeling_tf_utils.py | 6 +++--- src/transformers/modeling_utils.py | 2 +- .../models/auto/feature_extraction_auto.py | 4 ++-- src/transformers/models/auto/processing_auto.py | 2 +- src/transformers/models/auto/tokenization_auto.py | 2 +- src/transformers/pipelines/__init__.py | 2 +- src/transformers/tokenization_utils_base.py | 2 +- src/transformers/utils/hub.py | 10 +++++----- .../run_{{cookiecutter.example_shortcut}}.py | 2 +- .../scripts/pytorch/run_glue_model_parallelism.py | 2 +- 64 files changed, 72 insertions(+), 75 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 4552defb8efc45..348a719857830a 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -186,7 +186,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 5c8bf1bbc45dda..6396f4ced99695 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -172,7 +172,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/flax/language-modeling/run_clm_flax.py b/examples/flax/language-modeling/run_clm_flax.py index 5fe786da7cc5ad..1a0428fdd67039 100755 --- a/examples/flax/language-modeling/run_clm_flax.py +++ b/examples/flax/language-modeling/run_clm_flax.py @@ -173,7 +173,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index f3f3c324ecfea6..65f6a2285d9c34 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -172,7 +172,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/flax/language-modeling/run_t5_mlm_flax.py b/examples/flax/language-modeling/run_t5_mlm_flax.py index a2906c410879b9..0030fc8da66a57 100755 --- a/examples/flax/language-modeling/run_t5_mlm_flax.py +++ b/examples/flax/language-modeling/run_t5_mlm_flax.py @@ -172,7 +172,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/flax/question-answering/run_qa.py b/examples/flax/question-answering/run_qa.py index 0873b19413bfea..1b951e35839816 100644 --- a/examples/flax/question-answering/run_qa.py +++ b/examples/flax/question-answering/run_qa.py @@ -159,7 +159,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index 856fd6fdb7b36a..c193fe0bc3745a 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -186,7 +186,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/flax/text-classification/run_flax_glue.py b/examples/flax/text-classification/run_flax_glue.py index 7f5524dbb437d6..e0dfab2f52e994 100755 --- a/examples/flax/text-classification/run_flax_glue.py +++ b/examples/flax/text-classification/run_flax_glue.py @@ -105,7 +105,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/flax/token-classification/run_flax_ner.py b/examples/flax/token-classification/run_flax_ner.py index 0a66b5f1990bc9..ad68c0997fed81 100644 --- a/examples/flax/token-classification/run_flax_ner.py +++ b/examples/flax/token-classification/run_flax_ner.py @@ -153,7 +153,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/flax/vision/run_image_classification.py b/examples/flax/vision/run_image_classification.py index 305dd3ac205f0c..3de3c977ab1d46 100644 --- a/examples/flax/vision/run_image_classification.py +++ b/examples/flax/vision/run_image_classification.py @@ -162,7 +162,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/audio-classification/run_audio_classification.py b/examples/pytorch/audio-classification/run_audio_classification.py index 6c2a6cb8803976..9ebd4fb00759f5 100644 --- a/examples/pytorch/audio-classification/run_audio_classification.py +++ b/examples/pytorch/audio-classification/run_audio_classification.py @@ -156,7 +156,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index 22b420d856173c..d3c5355f9d07cf 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -90,7 +90,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index f8c2c95f59592e..2d26e42604da03 100644 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -145,7 +145,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/image-pretraining/run_mae.py b/examples/pytorch/image-pretraining/run_mae.py index aa8de32d8cb2ed..3ac4106b11acbf 100644 --- a/examples/pytorch/image-pretraining/run_mae.py +++ b/examples/pytorch/image-pretraining/run_mae.py @@ -137,7 +137,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/image-pretraining/run_mim.py b/examples/pytorch/image-pretraining/run_mim.py index f60b21600832e2..7626e8be363253 100644 --- a/examples/pytorch/image-pretraining/run_mim.py +++ b/examples/pytorch/image-pretraining/run_mim.py @@ -157,7 +157,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index 53052d7671e061..ca992c04562e5e 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -114,7 +114,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index dcc8bcd3cd955d..b635a7aea69881 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -111,7 +111,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index 15ff8eb45f6ebb..4a885ee49661fd 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -99,7 +99,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index 5771165cafeb8f..f9df919e1f92da 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -83,7 +83,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index cddcb4891beff6..54db2b7bb12d66 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -83,7 +83,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index 1c389e43f37759..ce110ae3646362 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -82,7 +82,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index c3c85b31da2f0d..8ffe114dbb8644 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -83,7 +83,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index 92d07f8f9199bc..bc1bfb2c1c0945 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -246,7 +246,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index fdbed2b3ab09a4..36efb44138d9a6 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -233,7 +233,7 @@ class DataTrainingArguments: metadata={ "help": ( "If :obj:`True`, will use the token generated when running" - ":obj:`transformers-cli login` as HTTP bearer authorization for remote files." + ":obj:`huggingface-cli login` as HTTP bearer authorization for remote files." ) }, ) diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index 0ce8ff05508ea9..015c1f0a653222 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -89,7 +89,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index 78d5b79ca4274a..5d6d5d5c771b3a 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -103,7 +103,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 556e1f3bbe66ef..49af0c85568c9b 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -192,7 +192,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index 2450d24e3d0169..d4cfc3a77d0b6d 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -156,7 +156,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 13993e58a450f1..9000b5006e03fa 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -83,7 +83,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index a519fa17533591..af1868b25aad35 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -93,7 +93,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/research_projects/layoutlmv3/run_funsd_cord.py b/examples/research_projects/layoutlmv3/run_funsd_cord.py index 66be61dffccf20..866f9a9c1b1163 100644 --- a/examples/research_projects/layoutlmv3/run_funsd_cord.py +++ b/examples/research_projects/layoutlmv3/run_funsd_cord.py @@ -81,7 +81,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/research_projects/mlm_wwm/run_mlm_wwm.py b/examples/research_projects/mlm_wwm/run_mlm_wwm.py index 0afa4135537a85..f14ad5adfeff16 100644 --- a/examples/research_projects/mlm_wwm/run_mlm_wwm.py +++ b/examples/research_projects/mlm_wwm/run_mlm_wwm.py @@ -101,7 +101,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/research_projects/quantization-qdqbert/run_quant_qa.py b/examples/research_projects/quantization-qdqbert/run_quant_qa.py index 97eece4c1d0ac9..5008197b8b845d 100755 --- a/examples/research_projects/quantization-qdqbert/run_quant_qa.py +++ b/examples/research_projects/quantization-qdqbert/run_quant_qa.py @@ -84,7 +84,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py index afa3397eb43000..5294e6a4a9aef9 100755 --- a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py +++ b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_bnb.py @@ -231,7 +231,7 @@ class DataTrainingArguments: metadata={ "help": ( "If :obj:`True`, will use the token generated when running" - ":obj:`transformers-cli login` as HTTP bearer authorization for remote files." + ":obj:`huggingface-cli login` as HTTP bearer authorization for remote files." ) }, ) diff --git a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py index 57f54048a52330..8add8fd20a72d9 100644 --- a/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py +++ b/examples/research_projects/robust-speech-event/run_speech_recognition_ctc_streaming.py @@ -234,7 +234,7 @@ class DataTrainingArguments: metadata={ "help": ( "If :obj:`True`, will use the token generated when running" - ":obj:`transformers-cli login` as HTTP bearer authorization for remote files." + ":obj:`huggingface-cli login` as HTTP bearer authorization for remote files." ) }, ) diff --git a/examples/research_projects/tapex/run_tabfact_with_tapex.py b/examples/research_projects/tapex/run_tabfact_with_tapex.py index 19c21c33948edb..23d094f8992a63 100644 --- a/examples/research_projects/tapex/run_tabfact_with_tapex.py +++ b/examples/research_projects/tapex/run_tabfact_with_tapex.py @@ -175,7 +175,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/research_projects/tapex/run_wikisql_with_tapex.py b/examples/research_projects/tapex/run_wikisql_with_tapex.py index 7573893629c6d6..1d402fa7e8f0e9 100644 --- a/examples/research_projects/tapex/run_wikisql_with_tapex.py +++ b/examples/research_projects/tapex/run_wikisql_with_tapex.py @@ -104,7 +104,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py b/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py index 7ffa8f5f91cc43..6f93f9b5166929 100644 --- a/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py +++ b/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py @@ -102,7 +102,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/research_projects/xtreme-s/run_xtreme_s.py b/examples/research_projects/xtreme-s/run_xtreme_s.py index d3e4f5cb38abf9..16fc1ac8a39c32 100644 --- a/examples/research_projects/xtreme-s/run_xtreme_s.py +++ b/examples/research_projects/xtreme-s/run_xtreme_s.py @@ -287,7 +287,7 @@ class DataTrainingArguments: metadata={ "help": ( "If :obj:`True`, will use the token generated when running" - ":obj:`transformers-cli login` as HTTP bearer authorization for remote files." + ":obj:`huggingface-cli login` as HTTP bearer authorization for remote files." ) }, ) diff --git a/examples/tensorflow/language-modeling/run_clm.py b/examples/tensorflow/language-modeling/run_clm.py index 46c8d339d970c3..3f12683d10d997 100755 --- a/examples/tensorflow/language-modeling/run_clm.py +++ b/examples/tensorflow/language-modeling/run_clm.py @@ -114,7 +114,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/tensorflow/language-modeling/run_mlm.py b/examples/tensorflow/language-modeling/run_mlm.py index 46b27dab662519..b421ed8e669c15 100755 --- a/examples/tensorflow/language-modeling/run_mlm.py +++ b/examples/tensorflow/language-modeling/run_mlm.py @@ -115,7 +115,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/tensorflow/multiple-choice/run_swag.py b/examples/tensorflow/multiple-choice/run_swag.py index b09b0e5598f514..6ba35bd0fd2023 100644 --- a/examples/tensorflow/multiple-choice/run_swag.py +++ b/examples/tensorflow/multiple-choice/run_swag.py @@ -157,7 +157,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/tensorflow/question-answering/run_qa.py b/examples/tensorflow/question-answering/run_qa.py index bd233f378a4dc9..91293aefb35f55 100755 --- a/examples/tensorflow/question-answering/run_qa.py +++ b/examples/tensorflow/question-answering/run_qa.py @@ -80,7 +80,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/tensorflow/summarization/run_summarization.py b/examples/tensorflow/summarization/run_summarization.py index 5d0737fdeffbb9..6d4cf99e6782f8 100644 --- a/examples/tensorflow/summarization/run_summarization.py +++ b/examples/tensorflow/summarization/run_summarization.py @@ -101,7 +101,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/tensorflow/text-classification/run_glue.py b/examples/tensorflow/text-classification/run_glue.py index fe7ef66ece129c..9fb0b3f8e43482 100644 --- a/examples/tensorflow/text-classification/run_glue.py +++ b/examples/tensorflow/text-classification/run_glue.py @@ -183,7 +183,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/tensorflow/text-classification/run_text_classification.py b/examples/tensorflow/text-classification/run_text_classification.py index 210a30344dbc0e..b5d19032971c5b 100644 --- a/examples/tensorflow/text-classification/run_text_classification.py +++ b/examples/tensorflow/text-classification/run_text_classification.py @@ -173,7 +173,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/tensorflow/token-classification/run_ner.py b/examples/tensorflow/token-classification/run_ner.py index cd4eea6feeb6dc..caa47e115a4bfa 100644 --- a/examples/tensorflow/token-classification/run_ner.py +++ b/examples/tensorflow/token-classification/run_ner.py @@ -83,7 +83,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/examples/tensorflow/translation/run_translation.py b/examples/tensorflow/translation/run_translation.py index 6e12288fd44f52..7f5eb9eb9defb7 100644 --- a/examples/tensorflow/translation/run_translation.py +++ b/examples/tensorflow/translation/run_translation.py @@ -95,7 +95,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, diff --git a/scripts/tatoeba/README.md b/scripts/tatoeba/README.md index b86caf51d725b0..7c492ec4f46e2e 100644 --- a/scripts/tatoeba/README.md +++ b/scripts/tatoeba/README.md @@ -57,7 +57,7 @@ To upload all converted models, 2. Login to `transformers-cli` ```bash -transformers-cli login +huggingface-cli login ``` 3. Run the `upload_models` script diff --git a/scripts/tatoeba/upload_models.sh b/scripts/tatoeba/upload_models.sh index 07c21edcbd519e..536eb5bc68c4c4 100755 --- a/scripts/tatoeba/upload_models.sh +++ b/scripts/tatoeba/upload_models.sh @@ -2,7 +2,7 @@ for FILE in converted/*; do model_name=`basename $FILE` - transformers-cli repo create $model_name -y + huggingface-cli repo create $model_name -y git clone https://huggingface.co/Helsinki-NLP/$model_name mv $FILE/* $model_name/ cd $model_name diff --git a/src/transformers/commands/user.py b/src/transformers/commands/user.py index 2f073235d25648..938f4c8ea8b616 100644 --- a/src/transformers/commands/user.py +++ b/src/transformers/commands/user.py @@ -22,9 +22,6 @@ from . import BaseTransformersCLICommand -UPLOAD_MAX_FILES = 15 - - class UserCommands(BaseTransformersCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): @@ -105,7 +102,7 @@ class LoginCommand(BaseUserCommand): def run(self): print( ANSI.red( - "ERROR! `transformers-cli login` uses an outdated login mechanism " + "ERROR! `huggingface-cli login` uses an outdated login mechanism " "that is not compatible with the Hugging Face Hub backend anymore. " "Please use `huggingface-cli login instead." ) diff --git a/src/transformers/configuration_utils.py b/src/transformers/configuration_utils.py index b10475127b4fce..b924cec9ae021c 100755 --- a/src/transformers/configuration_utils.py +++ b/src/transformers/configuration_utils.py @@ -463,7 +463,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any diff --git a/src/transformers/dynamic_module_utils.py b/src/transformers/dynamic_module_utils.py index 7baafd214c2558..da1434067cbdf8 100644 --- a/src/transformers/dynamic_module_utils.py +++ b/src/transformers/dynamic_module_utils.py @@ -195,7 +195,7 @@ def get_cached_module_file( 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any @@ -345,7 +345,7 @@ def get_class_from_dynamic_module( 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or `bool`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any diff --git a/src/transformers/feature_extraction_utils.py b/src/transformers/feature_extraction_utils.py index ec68f355191c1d..394d67a8c5a1a7 100644 --- a/src/transformers/feature_extraction_utils.py +++ b/src/transformers/feature_extraction_utils.py @@ -251,7 +251,7 @@ def from_pretrained( 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 1a63d32e4196a0..354bd9592f30cd 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -2096,7 +2096,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): Whether or not to only look at local files (e.g., not try doanloading the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any @@ -2472,8 +2472,8 @@ def push_to_hub( Whether or not the repository created should be private (requires a paying subscription). use_auth_token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). Will default to `True` if - `repo_url` is not specified. + when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` + is not specified. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 8709ec66365c66..771f1d2d5d84c0 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1659,7 +1659,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P Whether or not to only look at local files (i.e., do not try to download the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index ed526369df4f38..db581d03d8fb7e 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -142,7 +142,7 @@ def get_feature_extractor_config( 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any @@ -247,7 +247,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index d81dd19ea23dde..aed7b4b9761373 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -135,7 +135,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 7a2dc2941fdd09..d8759fd4e7842e 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -357,7 +357,7 @@ def get_tokenizer_config( 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index d2a4b663801d78..104726bbd8cc7a 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -505,7 +505,7 @@ def pipeline( Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set `device_map="auto"` to compute the most optimized `device_map` automatically. [More diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index fc1c0ff8da3b32..91537ef46cc864 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -1596,7 +1596,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only rely on local files and not to attempt to download any files. revision (`str`, *optional*, defaults to `"main"`): diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 2488ab8f690865..1aa086da6721ec 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -716,7 +716,7 @@ def cached_file( 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any @@ -870,7 +870,7 @@ def get_file_from_repo( 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any @@ -983,7 +983,7 @@ def get_list_of_files( identifier allowed by git. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). + when running `huggingface-cli login` (stored in `~/.huggingface`). local_files_only (`bool`, *optional*, defaults to `False`): Whether or not to only rely on local files and not to attempt to download any files. @@ -1161,8 +1161,8 @@ def push_to_hub( Whether or not the repository created should be private (requires a paying subscription). use_auth_token (`bool` or `str`, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `transformers-cli login` (stored in `~/.huggingface`). Will default to `True` if - `repo_url` is not specified. + when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` + is not specified. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): Only applicable for models. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size lower than this size. If expressed as a string, needs to be digits followed diff --git a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py index f07029ec242caa..e7a622edd71527 100755 --- a/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py +++ b/templates/adding_a_new_example_script/{{cookiecutter.directory_name}}/run_{{cookiecutter.example_shortcut}}.py @@ -118,7 +118,7 @@ class ModelArguments: use_auth_token: bool = field( default=False, metadata={ - "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "help": "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." }, ) diff --git a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py index 534b1656d10f3e..01185fdabac527 100644 --- a/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py +++ b/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py @@ -181,7 +181,7 @@ class ModelArguments: default=False, metadata={ "help": ( - "Will use the token generated when running `transformers-cli login` (necessary to use this script " + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, From 88a0ce57bb6f2e76b45b07de3c8c1df832af9d10 Mon Sep 17 00:00:00 2001 From: regisss <15324346+regisss@users.noreply.github.com> Date: Mon, 8 Aug 2022 14:08:11 +0200 Subject: [PATCH 033/539] Add seed setting to image classification example (#18519) --- .../pytorch/image-classification/run_image_classification.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index 2d26e42604da03..28000015ab173a 100644 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -43,6 +43,7 @@ HfArgumentParser, Trainer, TrainingArguments, + set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry @@ -214,6 +215,9 @@ def main(): "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) + # Set seed before initializing model. + set_seed(training_args.seed) + # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: dataset = load_dataset( From a4562552eb5efa8a12c61a3a7ebfd687dc72ee19 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Mon, 8 Aug 2022 14:25:56 +0200 Subject: [PATCH 034/539] [DX fix] Fixing QA pipeline streaming a dataset. (#18516) * [DX fix] Fixing QA pipeline streaming a dataset. QuestionAnsweringArgumentHandler would iterate over the whole dataset effectively killing all properties of the pipeline. This restores nice properties when using `Dataset` or `Generator` since those are meant to be consumed lazily. * Handling TF better. --- .../pipelines/question_answering.py | 17 ++++++++++++++++- .../test_pipelines_question_answering.py | 12 ++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index 0f5fbf0370e708..d58762035ef7f8 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -1,3 +1,4 @@ +import types import warnings from collections.abc import Iterable from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union @@ -22,8 +23,11 @@ from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING + Dataset = None + if is_torch_available(): import torch + from torch.utils.data import Dataset from ..models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING @@ -82,6 +86,11 @@ def __call__(self, *args, **kwargs): else: raise ValueError(f"Unknown arguments {kwargs}") + # When user is sending a generator we need to trust it's a valid example + generator_types = (types.GeneratorType, Dataset) if Dataset is not None else (types.GeneratorType,) + if isinstance(inputs, generator_types): + return inputs + # Normalize inputs if isinstance(inputs, dict): inputs = [inputs] @@ -245,12 +254,18 @@ def __call__(self, *args, **kwargs): """ # Convert inputs to features + examples = self._args_parser(*args, **kwargs) - if len(examples) == 1: + if isinstance(examples, (list, tuple)) and len(examples) == 1: return super().__call__(examples[0], **kwargs) return super().__call__(examples, **kwargs) def preprocess(self, example, padding="do_not_pad", doc_stride=None, max_question_len=64, max_seq_len=None): + # XXX: This is specal, args_parser will not handle anything generator or dataset like + # For those we expect user to send a simple valid example either directly as a SquadExample or simple dict. + # So we still need a little sanitation here. + if isinstance(example, dict): + example = SquadExample(None, example["question"], example["context"], None, None, None) if max_seq_len is None: max_seq_len = min(self.tokenizer.model_max_length, 384) diff --git a/tests/pipelines/test_pipelines_question_answering.py b/tests/pipelines/test_pipelines_question_answering.py index f34237612c11a9..c3a0da2f2b5e9a 100644 --- a/tests/pipelines/test_pipelines_question_answering.py +++ b/tests/pipelines/test_pipelines_question_answering.py @@ -125,6 +125,18 @@ def test_small_model_pt(self): self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) + @require_torch + def test_small_model_pt_iterator(self): + # https://github.com/huggingface/transformers/issues/18510 + pipe = pipeline(model="sshleifer/tiny-distilbert-base-cased-distilled-squad", batch_size=16, framework="pt") + + def data(): + for i in range(10): + yield {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."} + + for outputs in pipe(data()): + self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) + @require_torch def test_small_model_pt_softmax_trick(self): question_answerer = pipeline( From 377cdded7af50e7c023dc91bf69f4eef10ac8551 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 8 Aug 2022 08:48:10 -0400 Subject: [PATCH 035/539] Clean up hub (#18497) * Clean up utils.hub * Remove imports * More fixes * Last fix --- src/transformers/__init__.py | 2 - .../convert_pytorch_checkpoint_to_tf2.py | 19 +- src/transformers/dynamic_module_utils.py | 18 +- src/transformers/file_utils.py | 9 - src/transformers/modelcard.py | 77 +-- src/transformers/modeling_tf_utils.py | 4 +- src/transformers/modeling_utils.py | 4 +- src/transformers/models/rag/retrieval_rag.py | 15 +- .../transfo_xl/tokenization_transfo_xl.py | 17 +- src/transformers/pipelines/__init__.py | 4 +- src/transformers/utils/__init__.py | 9 - src/transformers/utils/hub.py | 535 +----------------- tests/utils/test_file_utils.py | 61 -- utils/check_repo.py | 1 - 14 files changed, 67 insertions(+), 708 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 5e1e95c6291b78..0a97952b18b85e 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -441,7 +441,6 @@ "TensorType", "add_end_docstrings", "add_start_docstrings", - "cached_path", "is_apex_available", "is_datasets_available", "is_faiss_available", @@ -3214,7 +3213,6 @@ TensorType, add_end_docstrings, add_start_docstrings, - cached_path, is_apex_available, is_datasets_available, is_faiss_available, diff --git a/src/transformers/convert_pytorch_checkpoint_to_tf2.py b/src/transformers/convert_pytorch_checkpoint_to_tf2.py index db7484f4b22701..6a05e40f0f804d 100755 --- a/src/transformers/convert_pytorch_checkpoint_to_tf2.py +++ b/src/transformers/convert_pytorch_checkpoint_to_tf2.py @@ -38,7 +38,6 @@ T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, - WEIGHTS_NAME, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, @@ -91,11 +90,10 @@ XLMConfig, XLMRobertaConfig, XLNetConfig, - cached_path, is_torch_available, load_pytorch_checkpoint_in_tf2_model, ) -from .utils import hf_bucket_url, logging +from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): @@ -311,7 +309,7 @@ def convert_pt_checkpoint_to_tf( # Initialise TF model if config_file in aws_config_map: - config_file = cached_path(aws_config_map[config_file], force_download=not use_cached_models) + config_file = cached_file(config_file, CONFIG_NAME, force_download=not use_cached_models) config = config_class.from_json_file(config_file) config.output_hidden_states = True config.output_attentions = True @@ -320,8 +318,9 @@ def convert_pt_checkpoint_to_tf( # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): - pytorch_checkpoint_url = hf_bucket_url(pytorch_checkpoint_path, filename=WEIGHTS_NAME) - pytorch_checkpoint_path = cached_path(pytorch_checkpoint_url, force_download=not use_cached_models) + pytorch_checkpoint_path = cached_file( + pytorch_checkpoint_path, WEIGHTS_NAME, force_download=not use_cached_models + ) # Load PyTorch checkpoint in tf2 model: tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path) @@ -395,14 +394,14 @@ def convert_all_pt_checkpoints_to_tf( print("-" * 100) if config_shortcut_name in aws_config_map: - config_file = cached_path(aws_config_map[config_shortcut_name], force_download=not use_cached_models) + config_file = cached_file(config_shortcut_name, CONFIG_NAME, force_download=not use_cached_models) else: - config_file = cached_path(config_shortcut_name, force_download=not use_cached_models) + config_file = config_shortcut_name if model_shortcut_name in aws_model_maps: - model_file = cached_path(aws_model_maps[model_shortcut_name], force_download=not use_cached_models) + model_file = cached_file(model_shortcut_name, WEIGHTS_NAME, force_download=not use_cached_models) else: - model_file = cached_path(model_shortcut_name, force_download=not use_cached_models) + model_file = model_shortcut_name if os.path.isfile(model_shortcut_name): model_shortcut_name = "converted_model" diff --git a/src/transformers/dynamic_module_utils.py b/src/transformers/dynamic_module_utils.py index da1434067cbdf8..0c2067cf2e53dd 100644 --- a/src/transformers/dynamic_module_utils.py +++ b/src/transformers/dynamic_module_utils.py @@ -24,14 +24,7 @@ from huggingface_hub import HfFolder, model_info -from .utils import ( - HF_MODULES_CACHE, - TRANSFORMERS_DYNAMIC_MODULE_NAME, - cached_path, - hf_bucket_url, - is_offline_mode, - logging, -) +from .utils import HF_MODULES_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, cached_file, is_offline_mode, logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name @@ -219,18 +212,15 @@ def get_cached_module_file( # Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file. pretrained_model_name_or_path = str(pretrained_model_name_or_path) if os.path.isdir(pretrained_model_name_or_path): - module_file_or_url = os.path.join(pretrained_model_name_or_path, module_file) submodule = "local" else: - module_file_or_url = hf_bucket_url( - pretrained_model_name_or_path, filename=module_file, revision=revision, mirror=None - ) submodule = pretrained_model_name_or_path.replace("/", os.path.sep) try: # Load from URL or cache if already cached - resolved_module_file = cached_path( - module_file_or_url, + resolved_module_file = cached_file( + pretrained_model_name_or_path, + module_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, diff --git a/src/transformers/file_utils.py b/src/transformers/file_utils.py index 94f9a3326d20e4..aa3681e057bb9d 100644 --- a/src/transformers/file_utils.py +++ b/src/transformers/file_utils.py @@ -69,20 +69,14 @@ add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, - cached_path, cached_property, copy_func, default_cache_path, define_sagemaker_information, - filename_to_url, get_cached_models, get_file_from_repo, - get_from_cache, get_full_repo_name, - get_list_of_files, has_file, - hf_bucket_url, - http_get, http_user_agent, is_apex_available, is_coloredlogs_available, @@ -94,7 +88,6 @@ is_in_notebook, is_ipex_available, is_librosa_available, - is_local_clone, is_offline_mode, is_onnx_available, is_pandas_available, @@ -105,7 +98,6 @@ is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, - is_remote_url, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, @@ -141,5 +133,4 @@ torch_only_method, torch_required, torch_version, - url_to_filename, ) diff --git a/src/transformers/modelcard.py b/src/transformers/modelcard.py index c5d07e11473778..dc842c2abbf72c 100644 --- a/src/transformers/modelcard.py +++ b/src/transformers/modelcard.py @@ -43,15 +43,10 @@ ) from .training_args import ParallelMode from .utils import ( - CONFIG_NAME, MODEL_CARD_NAME, - TF2_WEIGHTS_NAME, - WEIGHTS_NAME, - cached_path, - hf_bucket_url, + cached_file, is_datasets_available, is_offline_mode, - is_remote_url, is_tf_available, is_tokenizers_available, is_torch_available, @@ -153,11 +148,6 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. - find_from_standard_name: (*optional*) boolean, default True: - If the pretrained_model_name_or_path ends with our standard model or config filenames, replace them - with our standard modelcard filename. Can be used to directly feed a model/config url and access the - colocated modelcard. - return_unused_kwargs: (*optional*) bool: - If False, then this function returns just the final model card object. @@ -168,21 +158,15 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): Examples: ```python - modelcard = ModelCard.from_pretrained( - "bert-base-uncased" - ) # Download model card from huggingface.co and cache. - modelcard = ModelCard.from_pretrained( - "./test/saved_model/" - ) # E.g. model card was saved using *save_pretrained('./test/saved_model/')* + # Download model card from huggingface.co and cache. + modelcard = ModelCard.from_pretrained("bert-base-uncased") + # Model card was saved using *save_pretrained('./test/saved_model/')* + modelcard = ModelCard.from_pretrained("./test/saved_model/") modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json") modelcard = ModelCard.from_pretrained("bert-base-uncased", output_attentions=True, foo=False) ```""" - # This imports every model so let's do it dynamically here. - from transformers.models.auto.configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP - cache_dir = kwargs.pop("cache_dir", None) proxies = kwargs.pop("proxies", None) - find_from_standard_name = kwargs.pop("find_from_standard_name", True) return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) from_pipeline = kwargs.pop("_from_pipeline", None) @@ -190,37 +174,30 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline - if pretrained_model_name_or_path in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP: - # For simplicity we use the same pretrained url than the configuration files - # but with a different suffix (modelcard.json). This suffix is replaced below. - model_card_file = ALL_PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path] - elif os.path.isdir(pretrained_model_name_or_path): - model_card_file = os.path.join(pretrained_model_name_or_path, MODEL_CARD_NAME) - elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): - model_card_file = pretrained_model_name_or_path + is_local = os.path.isdir(pretrained_model_name_or_path) + if os.path.isfile(pretrained_model_name_or_path): + resolved_model_card_file = pretrained_model_name_or_path + is_local = True else: - model_card_file = hf_bucket_url(pretrained_model_name_or_path, filename=MODEL_CARD_NAME, mirror=None) - - if find_from_standard_name or pretrained_model_name_or_path in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP: - model_card_file = model_card_file.replace(CONFIG_NAME, MODEL_CARD_NAME) - model_card_file = model_card_file.replace(WEIGHTS_NAME, MODEL_CARD_NAME) - model_card_file = model_card_file.replace(TF2_WEIGHTS_NAME, MODEL_CARD_NAME) - - try: - # Load from URL or cache if already cached - resolved_model_card_file = cached_path( - model_card_file, cache_dir=cache_dir, proxies=proxies, user_agent=user_agent - ) - if resolved_model_card_file == model_card_file: - logger.info(f"loading model card file {model_card_file}") - else: - logger.info(f"loading model card file {model_card_file} from cache at {resolved_model_card_file}") - # Load model card - modelcard = cls.from_json_file(resolved_model_card_file) + try: + # Load from URL or cache if already cached + resolved_model_card_file = cached_file( + pretrained_model_name_or_path, + filename=MODEL_CARD_NAME, + cache_dir=cache_dir, + proxies=proxies, + user_agent=user_agent, + ) + if is_local: + logger.info(f"loading model card file {resolved_model_card_file}") + else: + logger.info(f"loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}") + # Load model card + modelcard = cls.from_json_file(resolved_model_card_file) - except (EnvironmentError, json.JSONDecodeError): - # We fall back on creating an empty model card - modelcard = cls() + except (EnvironmentError, json.JSONDecodeError): + # We fall back on creating an empty model card + modelcard = cls() # Update model card with kwargs if needed to_remove = [] diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 354bd9592f30cd..703440d80ad71b 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -2156,7 +2156,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) trust_remote_code = kwargs.pop("trust_remote_code", None) - mirror = kwargs.pop("mirror", None) + _ = kwargs.pop("mirror", None) load_weight_prefix = kwargs.pop("load_weight_prefix", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) @@ -2270,7 +2270,6 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): # message. has_file_kwargs = { "revision": revision, - "mirror": mirror, "proxies": proxies, "use_auth_token": use_auth_token, } @@ -2321,7 +2320,6 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, - mirror=mirror, ) config.name_or_path = pretrained_model_name_or_path diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 771f1d2d5d84c0..2a86128c221bec 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1784,7 +1784,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) trust_remote_code = kwargs.pop("trust_remote_code", None) - mirror = kwargs.pop("mirror", None) + _ = kwargs.pop("mirror", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) _fast_init = kwargs.pop("_fast_init", True) @@ -1955,7 +1955,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P # message. has_file_kwargs = { "revision": revision, - "mirror": mirror, "proxies": proxies, "use_auth_token": use_auth_token, } @@ -2012,7 +2011,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, - mirror=mirror, subfolder=subfolder, ) diff --git a/src/transformers/models/rag/retrieval_rag.py b/src/transformers/models/rag/retrieval_rag.py index 7a3c5635f24f9b..797c1a7332acf7 100644 --- a/src/transformers/models/rag/retrieval_rag.py +++ b/src/transformers/models/rag/retrieval_rag.py @@ -23,7 +23,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding -from ...utils import cached_path, is_datasets_available, is_faiss_available, is_remote_url, logging, requires_backends +from ...utils import cached_file, is_datasets_available, is_faiss_available, logging, requires_backends from .configuration_rag import RagConfig from .tokenization_rag import RagTokenizer @@ -111,22 +111,21 @@ def __init__(self, vector_size, index_path): self._index_initialized = False def _resolve_path(self, index_path, filename): - assert os.path.isdir(index_path) or is_remote_url(index_path), "Please specify a valid `index_path`." - archive_file = os.path.join(index_path, filename) + is_local = os.path.isdir(index_path) try: # Load from URL or cache if already cached - resolved_archive_file = cached_path(archive_file) + resolved_archive_file = cached_file(index_path, filename) except EnvironmentError: msg = ( - f"Can't load '{archive_file}'. Make sure that:\n\n" + f"Can't load '{filename}'. Make sure that:\n\n" f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n" f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n" ) raise EnvironmentError(msg) - if resolved_archive_file == archive_file: - logger.info(f"loading file {archive_file}") + if is_local: + logger.info(f"loading file {resolved_archive_file}") else: - logger.info(f"loading file {archive_file} from cache at {resolved_archive_file}") + logger.info(f"loading file {filename} from cache at {resolved_archive_file}") return resolved_archive_file def _load_passages(self): diff --git a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py b/src/transformers/models/transfo_xl/tokenization_transfo_xl.py index 92bcfe83199e8d..5b284a219a4753 100644 --- a/src/transformers/models/transfo_xl/tokenization_transfo_xl.py +++ b/src/transformers/models/transfo_xl/tokenization_transfo_xl.py @@ -29,7 +29,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...utils import ( - cached_path, + cached_file, is_sacremoses_available, is_torch_available, logging, @@ -681,24 +681,21 @@ def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, Instantiate a pre-processed corpus. """ vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs) - if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP: - corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path] - else: - corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME) + is_local = os.path.isdir(pretrained_model_name_or_path) # redirect to the cache, if necessary try: - resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir) + resolved_corpus_file = cached_file(pretrained_model_name_or_path, CORPUS_NAME, cache_dir=cache_dir) except EnvironmentError: logger.error( f"Corpus '{pretrained_model_name_or_path}' was not found in corpus list" f" ({', '.join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys())}. We assumed '{pretrained_model_name_or_path}'" - f" was a path or url but couldn't find files {corpus_file} at this path or url." + f" was a path or url but couldn't find files {CORPUS_NAME} at this path or url." ) return None - if resolved_corpus_file == corpus_file: - logger.info(f"loading corpus file {corpus_file}") + if is_local: + logger.info(f"loading corpus file {resolved_corpus_file}") else: - logger.info(f"loading corpus file {corpus_file} from cache at {resolved_corpus_file}") + logger.info(f"loading corpus file {CORPUS_NAME} from cache at {resolved_corpus_file}") # Instantiate tokenizer. corpus = cls(*inputs, **kwargs) diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 104726bbd8cc7a..dfa75768d8f811 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -25,6 +25,8 @@ from numpy import isin +from huggingface_hub.file_download import http_get + from ..configuration_utils import PretrainedConfig from ..dynamic_module_utils import get_class_from_dynamic_module from ..feature_extraction_utils import PreTrainedFeatureExtractor @@ -33,7 +35,7 @@ from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer from ..tokenization_utils import PreTrainedTokenizer from ..tokenization_utils_fast import PreTrainedTokenizerFast -from ..utils import HUGGINGFACE_CO_RESOLVE_ENDPOINT, http_get, is_tf_available, is_torch_available, logging +from ..utils import HUGGINGFACE_CO_RESOLVE_ENDPOINT, is_tf_available, is_torch_available, logging from .audio_classification import AudioClassificationPipeline from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline from .base import ( diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 023dffc27a703b..27276aa4946d5e 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -61,25 +61,16 @@ RepositoryNotFoundError, RevisionNotFoundError, cached_file, - cached_path, default_cache_path, define_sagemaker_information, - filename_to_url, get_cached_models, get_file_from_repo, - get_from_cache, get_full_repo_name, - get_list_of_files, has_file, - hf_bucket_url, - http_get, http_user_agent, - is_local_clone, is_offline_mode, - is_remote_url, move_cache, send_example_telemetry, - url_to_filename, ) from .import_utils import ( ENV_VARS_TRUE_AND_AUTO_VALUES, diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 1aa086da6721ec..5f9421912c49d7 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -14,44 +14,32 @@ """ Hub utilities: utilities related to download and cache models """ -import copy -import fnmatch -import io import json import os import re import shutil -import subprocess import sys -import tarfile -import tempfile import traceback import warnings from contextlib import contextmanager -from functools import partial -from hashlib import sha256 from pathlib import Path -from typing import BinaryIO, Dict, List, Optional, Tuple, Union -from urllib.parse import urlparse +from typing import Dict, List, Optional, Tuple, Union from uuid import uuid4 -from zipfile import ZipFile, is_zipfile import huggingface_hub import requests -from filelock import FileLock from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, hf_hub_download, - list_repo_files, + hf_hub_url, whoami, ) from huggingface_hub.constants import HUGGINGFACE_HEADER_X_LINKED_ETAG, HUGGINGFACE_HEADER_X_REPO_COMMIT from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from requests.exceptions import HTTPError -from requests.models import Response from transformers.utils.logging import tqdm from . import __version__, logging @@ -128,93 +116,6 @@ def is_offline_mode(): HUGGINGFACE_CO_EXAMPLES_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/examples" -def is_remote_url(url_or_filename): - parsed = urlparse(url_or_filename) - return parsed.scheme in ("http", "https") - - -def hf_bucket_url( - model_id: str, filename: str, subfolder: Optional[str] = None, revision: Optional[str] = None, mirror=None -) -> str: - """ - Resolve a model identifier, a file name, and an optional revision id, to a huggingface.co-hosted url, redirecting - to Cloudfront (a Content Delivery Network, or CDN) for large files. - - Cloudfront is replicated over the globe so downloads are way faster for the end user (and it also lowers our - bandwidth costs). - - Cloudfront aggressively caches files by default (default TTL is 24 hours), however this is not an issue here - because we migrated to a git-based versioning system on huggingface.co, so we now store the files on S3/Cloudfront - in a content-addressable way (i.e., the file name is its hash). Using content-addressable filenames means cache - can't ever be stale. - - In terms of client-side caching from this library, we base our caching on the objects' ETag. An object' ETag is: - its sha1 if stored in git, or its sha256 if stored in git-lfs. Files cached locally from transformers before v3.5.0 - are not shared with those new files, because the cached file's name contains a hash of the url (which changed). - """ - if subfolder is not None: - filename = f"{subfolder}/{filename}" - - if mirror: - if mirror in ["tuna", "bfsu"]: - raise ValueError("The Tuna and BFSU mirrors are no longer available. Try removing the mirror argument.") - legacy_format = "/" not in model_id - if legacy_format: - return f"{mirror}/{model_id}-{filename}" - else: - return f"{mirror}/{model_id}/{filename}" - - if revision is None: - revision = "main" - return HUGGINGFACE_CO_PREFIX.format(model_id=model_id, revision=revision, filename=filename) - - -def url_to_filename(url: str, etag: Optional[str] = None) -> str: - """ - Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, - delimited by a period. If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name so that TF 2.0 can - identify it as a HDF5 file (see - https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) - """ - url_bytes = url.encode("utf-8") - filename = sha256(url_bytes).hexdigest() - - if etag: - etag_bytes = etag.encode("utf-8") - filename += "." + sha256(etag_bytes).hexdigest() - - if url.endswith(".h5"): - filename += ".h5" - - return filename - - -def filename_to_url(filename, cache_dir=None): - """ - Return the url and etag (which may be `None`) stored for *filename*. Raise `EnvironmentError` if *filename* or its - stored metadata do not exist. - """ - if cache_dir is None: - cache_dir = TRANSFORMERS_CACHE - if isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - cache_path = os.path.join(cache_dir, filename) - if not os.path.exists(cache_path): - raise EnvironmentError(f"file {cache_path} not found") - - meta_path = cache_path + ".json" - if not os.path.exists(meta_path): - raise EnvironmentError(f"file {meta_path} not found") - - with open(meta_path, encoding="utf-8") as meta_file: - metadata = json.load(meta_file) - url = metadata["url"] - etag = metadata["etag"] - - return url, etag - - def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: """ Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, @@ -248,108 +149,6 @@ def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: return cached_models -def cached_path( - url_or_filename, - cache_dir=None, - force_download=False, - proxies=None, - resume_download=False, - user_agent: Union[Dict, str, None] = None, - extract_compressed_file=False, - force_extract=False, - use_auth_token: Union[bool, str, None] = None, - local_files_only=False, -) -> Optional[str]: - """ - Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file - and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and - then return the path - - Args: - cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). - force_download: if True, re-download the file even if it's already cached in the cache dir. - resume_download: if True, resume the download if incompletely received file is found. - user_agent: Optional string or dict that will be appended to the user-agent on remote requests. - use_auth_token: Optional string or boolean to use as Bearer token for remote files. If True, - will get token from ~/.huggingface. - extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed - file in a folder along the archive. - force_extract: if True when extract_compressed_file is True and the archive was already extracted, - re-extract the archive and override the folder where it was extracted. - - Return: - Local path (string) of file or if networking is off, last version of file cached on disk. - - Raises: - In case of non-recoverable file (non-existent or inaccessible url + no cache on disk). - """ - if cache_dir is None: - cache_dir = TRANSFORMERS_CACHE - if isinstance(url_or_filename, Path): - url_or_filename = str(url_or_filename) - if isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - if is_offline_mode() and not local_files_only: - logger.info("Offline mode: forcing local_files_only=True") - local_files_only = True - - if is_remote_url(url_or_filename): - # URL, so get it from the cache (downloading if necessary) - output_path = get_from_cache( - url_or_filename, - cache_dir=cache_dir, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - user_agent=user_agent, - use_auth_token=use_auth_token, - local_files_only=local_files_only, - ) - elif os.path.exists(url_or_filename): - # File, and it exists. - output_path = url_or_filename - elif urlparse(url_or_filename).scheme == "": - # File, but it doesn't exist. - raise EnvironmentError(f"file {url_or_filename} not found") - else: - # Something unknown - raise ValueError(f"unable to parse {url_or_filename} as a URL or as a local path") - - if extract_compressed_file: - if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): - return output_path - - # Path where we extract compressed archives - # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" - output_dir, output_file = os.path.split(output_path) - output_extract_dir_name = output_file.replace(".", "-") + "-extracted" - output_path_extracted = os.path.join(output_dir, output_extract_dir_name) - - if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: - return output_path_extracted - - # Prevent parallel extractions - lock_path = output_path + ".lock" - with FileLock(lock_path): - shutil.rmtree(output_path_extracted, ignore_errors=True) - os.makedirs(output_path_extracted) - if is_zipfile(output_path): - with ZipFile(output_path, "r") as zip_file: - zip_file.extractall(output_path_extracted) - zip_file.close() - elif tarfile.is_tarfile(output_path): - tar_file = tarfile.open(output_path) - tar_file.extractall(output_path_extracted) - tar_file.close() - else: - raise EnvironmentError(f"Archive format of {output_path} could not be identified") - - return output_path_extracted - - return output_path - - def define_sagemaker_information(): try: instance_data = requests.get(os.environ["ECS_CONTAINER_METADATA_URI"]).json() @@ -399,234 +198,6 @@ def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: return ua -def _raise_for_status(response: Response): - """ - Internal version of `request.raise_for_status()` that will refine a potential HTTPError. - """ - if "X-Error-Code" in response.headers: - error_code = response.headers["X-Error-Code"] - if error_code == "RepoNotFound": - raise RepositoryNotFoundError(f"404 Client Error: Repository Not Found for url: {response.url}") - elif error_code == "EntryNotFound": - raise EntryNotFoundError(f"404 Client Error: Entry Not Found for url: {response.url}") - elif error_code == "RevisionNotFound": - raise RevisionNotFoundError(f"404 Client Error: Revision Not Found for url: {response.url}") - - if response.status_code == 401: - # The repo was not found and the user is not Authenticated - raise RepositoryNotFoundError( - f"401 Client Error: Repository not found for url: {response.url}. " - "If the repo is private, make sure you are authenticated." - ) - - response.raise_for_status() - - -def http_get( - url: str, - temp_file: BinaryIO, - proxies=None, - resume_size=0, - headers: Optional[Dict[str, str]] = None, - file_name: Optional[str] = None, -): - """ - Download remote file. Do not gobble up errors. - """ - headers = copy.deepcopy(headers) - if resume_size > 0: - headers["Range"] = f"bytes={resume_size}-" - r = requests.get(url, stream=True, proxies=proxies, headers=headers) - _raise_for_status(r) - content_length = r.headers.get("Content-Length") - total = resume_size + int(content_length) if content_length is not None else None - # `tqdm` behavior is determined by `utils.logging.is_progress_bar_enabled()` - # and can be set using `utils.logging.enable/disable_progress_bar()` - progress = tqdm( - unit="B", - unit_scale=True, - unit_divisor=1024, - total=total, - initial=resume_size, - desc=f"Downloading {file_name}" if file_name is not None else "Downloading", - ) - for chunk in r.iter_content(chunk_size=1024): - if chunk: # filter out keep-alive new chunks - progress.update(len(chunk)) - temp_file.write(chunk) - progress.close() - - -def get_from_cache( - url: str, - cache_dir=None, - force_download=False, - proxies=None, - etag_timeout=10, - resume_download=False, - user_agent: Union[Dict, str, None] = None, - use_auth_token: Union[bool, str, None] = None, - local_files_only=False, -) -> Optional[str]: - """ - Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the - path to the cached file. - - Return: - Local path (string) of file or if networking is off, last version of file cached on disk. - - Raises: - In case of non-recoverable file (non-existent or inaccessible url + no cache on disk). - """ - if cache_dir is None: - cache_dir = TRANSFORMERS_CACHE - if isinstance(cache_dir, Path): - cache_dir = str(cache_dir) - - os.makedirs(cache_dir, exist_ok=True) - - headers = {"user-agent": http_user_agent(user_agent)} - if isinstance(use_auth_token, str): - headers["authorization"] = f"Bearer {use_auth_token}" - elif use_auth_token: - token = HfFolder.get_token() - if token is None: - raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.") - headers["authorization"] = f"Bearer {token}" - - url_to_download = url - etag = None - if not local_files_only: - try: - r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=etag_timeout) - _raise_for_status(r) - etag = r.headers.get("X-Linked-Etag") or r.headers.get("ETag") - # We favor a custom header indicating the etag of the linked resource, and - # we fallback to the regular etag header. - # If we don't have any of those, raise an error. - if etag is None: - raise OSError( - "Distant resource does not have an ETag, we won't be able to reliably ensure reproducibility." - ) - # In case of a redirect, - # save an extra redirect on the request.get call, - # and ensure we download the exact atomic version even if it changed - # between the HEAD and the GET (unlikely, but hey). - if 300 <= r.status_code <= 399: - url_to_download = r.headers["Location"] - except ( - requests.exceptions.SSLError, - requests.exceptions.ProxyError, - RepositoryNotFoundError, - EntryNotFoundError, - RevisionNotFoundError, - ): - # Actually raise for those subclasses of ConnectionError - # Also raise the custom errors coming from a non existing repo/branch/file as they are caught later on. - raise - except (HTTPError, requests.exceptions.ConnectionError, requests.exceptions.Timeout): - # Otherwise, our Internet connection is down. - # etag is None - pass - - filename = url_to_filename(url, etag) - - # get cache path to put the file - cache_path = os.path.join(cache_dir, filename) - - # etag is None == we don't have a connection or we passed local_files_only. - # try to get the last downloaded one - if etag is None: - if os.path.exists(cache_path): - return cache_path - else: - matching_files = [ - file - for file in fnmatch.filter(os.listdir(cache_dir), filename.split(".")[0] + ".*") - if not file.endswith(".json") and not file.endswith(".lock") - ] - if len(matching_files) > 0: - return os.path.join(cache_dir, matching_files[-1]) - else: - # If files cannot be found and local_files_only=True, - # the models might've been found if local_files_only=False - # Notify the user about that - if local_files_only: - fname = url.split("/")[-1] - raise EntryNotFoundError( - f"Cannot find the requested file ({fname}) in the cached path and outgoing traffic has been" - " disabled. To enable model look-ups and downloads online, set 'local_files_only'" - " to False." - ) - else: - raise ValueError( - "Connection error, and we cannot find the requested files in the cached path." - " Please try again or make sure your Internet connection is on." - ) - - # From now on, etag is not None. - if os.path.exists(cache_path) and not force_download: - return cache_path - - # Prevent parallel downloads of the same file with a lock. - lock_path = cache_path + ".lock" - with FileLock(lock_path): - - # If the download just completed while the lock was activated. - if os.path.exists(cache_path) and not force_download: - # Even if returning early like here, the lock will be released. - return cache_path - - if resume_download: - incomplete_path = cache_path + ".incomplete" - - @contextmanager - def _resumable_file_manager() -> "io.BufferedWriter": - with open(incomplete_path, "ab") as f: - yield f - - temp_file_manager = _resumable_file_manager - if os.path.exists(incomplete_path): - resume_size = os.stat(incomplete_path).st_size - else: - resume_size = 0 - else: - temp_file_manager = partial(tempfile.NamedTemporaryFile, mode="wb", dir=cache_dir, delete=False) - resume_size = 0 - - # Download to temporary file, then copy to cache dir once finished. - # Otherwise you get corrupt cache entries if the download gets interrupted. - with temp_file_manager() as temp_file: - logger.info(f"{url} not found in cache or force_download set to True, downloading to {temp_file.name}") - - # The url_to_download might be messy, so we extract the file name from the original url. - file_name = url.split("/")[-1] - http_get( - url_to_download, - temp_file, - proxies=proxies, - resume_size=resume_size, - headers=headers, - file_name=file_name, - ) - - logger.info(f"storing {url} in cache at {cache_path}") - os.replace(temp_file.name, cache_path) - - # NamedTemporaryFile creates a file with hardwired 0600 perms (ignoring umask), so fixing it. - umask = os.umask(0o666) - os.umask(umask) - os.chmod(cache_path, 0o666 & ~umask) - - logger.info(f"creating metadata file for {cache_path}") - meta = {"url": url, "etag": etag} - meta_path = cache_path + ".json" - with open(meta_path, "w") as meta_file: - json.dump(meta, meta_file) - - return cache_path - - def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None): """ Explores the cache to return the latest cached file for a given revision. @@ -919,7 +490,6 @@ def has_file( path_or_repo: Union[str, os.PathLike], filename: str, revision: Optional[str] = None, - mirror: Optional[str] = None, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, ): @@ -936,7 +506,7 @@ def has_file( if os.path.isdir(path_or_repo): return os.path.isfile(os.path.join(path_or_repo, filename)) - url = hf_bucket_url(path_or_repo, filename=filename, revision=revision, mirror=mirror) + url = hf_hub_url(path_or_repo, filename=filename, revision=revision) headers = {"user-agent": http_user_agent()} if isinstance(use_auth_token, str): @@ -965,89 +535,6 @@ def has_file( return False -def get_list_of_files( - path_or_repo: Union[str, os.PathLike], - revision: Optional[str] = None, - use_auth_token: Optional[Union[bool, str]] = None, - local_files_only: bool = False, -) -> List[str]: - """ - Gets the list of files inside `path_or_repo`. - - Args: - path_or_repo (`str` or `os.PathLike`): - Can be either the id of a repo on huggingface.co or a path to a *directory*. - revision (`str`, *optional*, defaults to `"main"`): - The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a - git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any - identifier allowed by git. - use_auth_token (`str` or *bool*, *optional*): - The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated - when running `huggingface-cli login` (stored in `~/.huggingface`). - local_files_only (`bool`, *optional*, defaults to `False`): - Whether or not to only rely on local files and not to attempt to download any files. - - - - This API is not optimized, so calling it a lot may result in connection errors. - - - - Returns: - `List[str]`: The list of files available in `path_or_repo`. - """ - path_or_repo = str(path_or_repo) - # If path_or_repo is a folder, we just return what is inside (subdirectories included). - if os.path.isdir(path_or_repo): - list_of_files = [] - for path, dir_names, file_names in os.walk(path_or_repo): - list_of_files.extend([os.path.join(path, f) for f in file_names]) - return list_of_files - - # Can't grab the files if we are on offline mode. - if is_offline_mode() or local_files_only: - return [] - - # Otherwise we grab the token and use the list_repo_files method. - if isinstance(use_auth_token, str): - token = use_auth_token - elif use_auth_token is True: - token = HfFolder.get_token() - else: - token = None - - try: - return list_repo_files(path_or_repo, revision=revision, token=token) - except HTTPError as e: - raise ValueError( - f"{path_or_repo} is not a local path or a model identifier on the model Hub. Did you make a typo?" - ) from e - - -def is_local_clone(repo_path, repo_url): - """ - Checks if the folder in `repo_path` is a local clone of `repo_url`. - """ - # First double-check that `repo_path` is a git repo - if not os.path.exists(os.path.join(repo_path, ".git")): - return False - test_git = subprocess.run("git branch".split(), cwd=repo_path) - if test_git.returncode != 0: - return False - - # Then look at its remotes - remotes = subprocess.run( - "git remote -v".split(), - stderr=subprocess.PIPE, - stdout=subprocess.PIPE, - check=True, - encoding="utf-8", - cwd=repo_path, - ).stdout - - return repo_url in remotes.split() - - class PushToHubMixin: """ A Mixin containing the functionality to push a model or tokenizer to the hub. @@ -1310,7 +797,6 @@ def get_checkpoint_shard_files( use_auth_token=None, user_agent=None, revision=None, - mirror=None, subfolder="", ): """ @@ -1343,18 +829,11 @@ def get_checkpoint_shard_files( # At this stage pretrained_model_name_or_path is a model identifier on the Hub cached_filenames = [] for shard_filename in shard_filenames: - shard_url = hf_bucket_url( - pretrained_model_name_or_path, - filename=shard_filename, - revision=revision, - mirror=mirror, - subfolder=subfolder if len(subfolder) > 0 else None, - ) - try: # Load from URL - cached_filename = cached_path( - shard_url, + cached_filename = cached_file( + pretrained_model_name_or_path, + shard_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, @@ -1362,6 +841,8 @@ def get_checkpoint_shard_files( local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, + revision=revision, + subfolder=subfolder, ) # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so # we don't have to catch them here. diff --git a/tests/utils/test_file_utils.py b/tests/utils/test_file_utils.py index 19adfe21dd4bf6..60676e9f7d9d37 100644 --- a/tests/utils/test_file_utils.py +++ b/tests/utils/test_file_utils.py @@ -26,20 +26,13 @@ from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER from transformers.utils import ( - CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, WEIGHTS_NAME, ContextManagers, - EntryNotFoundError, - RepositoryNotFoundError, - RevisionNotFoundError, - filename_to_url, find_labels, get_file_from_repo, - get_from_cache, has_file, - hf_bucket_url, is_flax_available, is_tf_available, is_torch_available, @@ -85,60 +78,6 @@ def test_module_spec_available(self): class GetFromCacheTests(unittest.TestCase): - def test_bogus_url(self): - # This lets us simulate no connection - # as the error raised is the same - # `ConnectionError` - url = "https://bogus" - with self.assertRaisesRegex(ValueError, "Connection error"): - _ = get_from_cache(url) - - def test_file_not_found(self): - # Valid revision (None) but missing file. - url = hf_bucket_url(MODEL_ID, filename="missing.bin") - with self.assertRaisesRegex(EntryNotFoundError, "404 Client Error"): - _ = get_from_cache(url) - - def test_model_not_found_not_authenticated(self): - # Invalid model id. - url = hf_bucket_url("bert-base", filename="pytorch_model.bin") - with self.assertRaisesRegex(RepositoryNotFoundError, "401 Client Error"): - _ = get_from_cache(url) - - @unittest.skip("No authentication when testing against prod") - def test_model_not_found_authenticated(self): - # Invalid model id. - url = hf_bucket_url("bert-base", filename="pytorch_model.bin") - with self.assertRaisesRegex(RepositoryNotFoundError, "404 Client Error"): - _ = get_from_cache(url, use_auth_token="hf_sometoken") - # ^ TODO - if we decide to unskip this: use a real / functional token - - def test_revision_not_found(self): - # Valid file but missing revision - url = hf_bucket_url(MODEL_ID, filename=CONFIG_NAME, revision=REVISION_ID_INVALID) - with self.assertRaisesRegex(RevisionNotFoundError, "404 Client Error"): - _ = get_from_cache(url) - - def test_standard_object(self): - url = hf_bucket_url(MODEL_ID, filename=CONFIG_NAME, revision=REVISION_ID_DEFAULT) - filepath = get_from_cache(url, force_download=True) - metadata = filename_to_url(filepath) - self.assertEqual(metadata, (url, f'"{PINNED_SHA1}"')) - - def test_standard_object_rev(self): - # Same object, but different revision - url = hf_bucket_url(MODEL_ID, filename=CONFIG_NAME, revision=REVISION_ID_ONE_SPECIFIC_COMMIT) - filepath = get_from_cache(url, force_download=True) - metadata = filename_to_url(filepath) - self.assertNotEqual(metadata[1], f'"{PINNED_SHA1}"') - # Caution: check that the etag is *not* equal to the one from `test_standard_object` - - def test_lfs_object(self): - url = hf_bucket_url(MODEL_ID, filename=WEIGHTS_NAME, revision=REVISION_ID_DEFAULT) - filepath = get_from_cache(url, force_download=True) - metadata = filename_to_url(filepath) - self.assertEqual(metadata, (url, f'"{PINNED_SHA256}"')) - def test_has_file(self): self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only", WEIGHTS_NAME)) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", TF2_WEIGHTS_NAME)) diff --git a/utils/check_repo.py b/utils/check_repo.py index 00cc6a048b9b2d..d2271e87ebf178 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -614,7 +614,6 @@ def find_all_documented_objects(): "absl", # External module "add_end_docstrings", # Internal, should never have been in the main init. "add_start_docstrings", # Internal, should never have been in the main init. - "cached_path", # Internal used for downloading models. "convert_tf_weight_name_to_pt_weight_name", # Internal used to convert model weights "logger", # Internal logger "logging", # External module From 2fecde742db1b08e402eb6b11cfc3d80f2ec8a21 Mon Sep 17 00:00:00 2001 From: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Date: Mon, 8 Aug 2022 18:56:51 +0530 Subject: [PATCH 036/539] update fsdp docs (#18521) * updating fsdp documentation * typo fix --- docs/source/en/main_classes/trainer.mdx | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/docs/source/en/main_classes/trainer.mdx b/docs/source/en/main_classes/trainer.mdx index e5807bd1380f8a..44c9d1d4b01973 100644 --- a/docs/source/en/main_classes/trainer.mdx +++ b/docs/source/en/main_classes/trainer.mdx @@ -567,14 +567,22 @@ as the model saving with FSDP activated is only available with recent fixes. For this, add `--fsdp full_shard` to the command line arguments. - SHARD_GRAD_OP : Shards optimizer states + gradients across data parallel workers/GPUs. For this, add `--fsdp shard_grad_op` to the command line arguments. + - NO_SHARD : No sharding. For this, add `--fsdp no_shard` to the command line arguments. - To offload the parameters and gradients to the CPU, add `--fsdp "full_shard offload"` or `--fsdp "shard_grad_op offload"` to the command line arguments. - To automatically recursively wrap layers with FSDP using `default_auto_wrap_policy`, add `--fsdp "full_shard auto_wrap"` or `--fsdp "shard_grad_op auto_wrap"` to the command line arguments. - To enable both CPU offloading and auto wrapping, add `--fsdp "full_shard offload auto_wrap"` or `--fsdp "shard_grad_op offload auto_wrap"` to the command line arguments. -- If auto wrapping is enabled, please add `--fsdp_min_num_params ` to command line arguments. -It specifies FSDP's minimum number of parameters for Default Auto Wrapping. +- If auto wrapping is enabled, you can either use transformer based auto wrap policy or size based auto wrap policy. + - For transformer based auto wrap policy, please add `--fsdp_transformer_layer_cls_to_wrap ` to command line arguments. + This specifies the transformer layer class name (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block` .... + This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units. + Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers. + Remaining layers including the shared embeddings are conviniently wrapped in same outermost FSDP unit. + Therefore, use this for transformer based models. + - For size based auto wrap policy, please add `--fsdp_min_num_params ` to command line arguments. + It specifies FSDP's minimum number of parameters for auto wrapping. **Few caveats to be aware of** - Mixed precision is currently not supported with FSDP as we wait for PyTorch to fix support for it. From 70b0d4e193ea3d15effebc7cda534b6b9454abef Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 8 Aug 2022 09:53:08 -0400 Subject: [PATCH 037/539] Fix compatibility with 1.12 (#17925) * Fix compatibility with 1.12 * Remove pin from examples requirements * Update torch scatter version * Fix compatibility with 1.12 * Remove pin from examples requirements * Update torch scatter version * fix torch.onnx.symbolic_opset12 import * Reject bad version Co-authored-by: ydshieh --- .circleci/config.yml | 16 ++++++++-------- examples/pytorch/_tests_requirements.txt | 1 - setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- .../models/deberta/modeling_deberta.py | 4 +++- .../models/deberta_v2/modeling_deberta_v2.py | 4 +++- src/transformers/models/sew_d/modeling_sew_d.py | 4 +++- 7 files changed, 19 insertions(+), 14 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 46bdc16006a943..83ee65248e9cac 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -83,7 +83,7 @@ jobs: - run: git lfs install - run: pip install --upgrade pip - run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install tensorflow_probability - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate @@ -124,7 +124,7 @@ jobs: - run: git lfs install - run: pip install --upgrade pip - run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install tensorflow_probability - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate @@ -159,7 +159,7 @@ jobs: - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate - save_cache: @@ -198,7 +198,7 @@ jobs: - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate - save_cache: @@ -231,7 +231,7 @@ jobs: - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time - run: pip install --upgrade pip - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate - save_cache: @@ -269,7 +269,7 @@ jobs: - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate - save_cache: @@ -439,7 +439,7 @@ jobs: - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install https://github.com/kpu/kenlm/archive/master.zip - save_cache: key: v0.5-torch-{{ checksum "setup.py" }} @@ -477,7 +477,7 @@ jobs: - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.11.0+cpu.html + - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - run: pip install https://github.com/kpu/kenlm/archive/master.zip - save_cache: key: v0.5-torch-{{ checksum "setup.py" }} diff --git a/examples/pytorch/_tests_requirements.txt b/examples/pytorch/_tests_requirements.txt index 8c3c0c38434cba..979890f4b79c38 100644 --- a/examples/pytorch/_tests_requirements.txt +++ b/examples/pytorch/_tests_requirements.txt @@ -22,5 +22,4 @@ protobuf torchvision jiwer librosa -torch < 1.12 evaluate >= 0.2.0 diff --git a/setup.py b/setup.py index 52b7837a88456d..391de689ec4b84 100644 --- a/setup.py +++ b/setup.py @@ -162,7 +162,7 @@ "timeout-decorator", "timm", "tokenizers>=0.11.1,!=0.11.3,<0.13", - "torch>=1.0,<1.12", + "torch>=1.0,!=0.12.0", "torchaudio", "pyctcdecode>=0.3.0", "tqdm>=4.27", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index b0020ab8977867..bb98fcc024aa92 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -68,7 +68,7 @@ "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.13", - "torch": "torch>=1.0,<1.12", + "torch": "torch>=1.0,!=0.12.0", "torchaudio": "torchaudio", "pyctcdecode": "pyctcdecode>=0.3.0", "tqdm": "tqdm>=4.27", diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index 2d9e647c130cab..df3d4d95cd0170 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -187,6 +187,8 @@ def backward(ctx, grad_output): @staticmethod def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: + from torch.onnx import symbolic_opset12 + dropout_p = local_ctx if isinstance(local_ctx, DropoutContext): dropout_p = local_ctx.dropout @@ -198,7 +200,7 @@ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, D # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like: # if opset_version < 12: # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train) - return torch.onnx.symbolic_opset12.dropout(g, input, dropout_p, train) + return symbolic_opset12.dropout(g, input, dropout_p, train) class StableDropout(nn.Module): diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index 738981648af956..a513a8280ed51d 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -193,6 +193,8 @@ def backward(ctx, grad_output): @staticmethod def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: + from torch.onnx import symbolic_opset12 + dropout_p = local_ctx if isinstance(local_ctx, DropoutContext): dropout_p = local_ctx.dropout @@ -204,7 +206,7 @@ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, D # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like: # if opset_version < 12: # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train) - return torch.onnx.symbolic_opset12.dropout(g, input, dropout_p, train) + return symbolic_opset12.dropout(g, input, dropout_p, train) # Copied from transformers.models.deberta.modeling_deberta.StableDropout diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index e582705ab09424..a9a231aec1d8e6 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -597,6 +597,8 @@ def backward(ctx, grad_output): @staticmethod def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: + from torch.onnx import symbolic_opset12 + dropout_p = local_ctx if isinstance(local_ctx, DropoutContext): dropout_p = local_ctx.dropout @@ -608,7 +610,7 @@ def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, D # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like: # if opset_version < 12: # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train) - return torch.onnx.symbolic_opset12.dropout(g, input, dropout_p, train) + return symbolic_opset12.dropout(g, input, dropout_p, train) # Copied from transformers.models.deberta.modeling_deberta.StableDropout From aff5117f4695666f1fe9400f159e2a137f806544 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Mon, 8 Aug 2022 09:54:10 -0400 Subject: [PATCH 038/539] Remove debug statement --- src/transformers/tokenization_utils_base.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index 91537ef46cc864..f85dc73cb659cb 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -1713,7 +1713,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], resolved_vocab_files = {} unresolved_files = [] for file_id, file_path in vocab_files.items(): - print(file_id, file_path) if file_path is None: resolved_vocab_files[file_id] = None else: From 7495924007f9fcdc58f1014c2d3b369898d803b9 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 8 Aug 2022 07:22:17 -0700 Subject: [PATCH 039/539] Specify en in doc-builder README example (#18526) Co-authored-by: Ankur Goyal --- docs/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/README.md b/docs/README.md index c8399a1ee60255..964a8b8b27a3a4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -43,7 +43,7 @@ Once you have setup the `doc-builder` and additional packages, you can generate typing the following command: ```bash -doc-builder build transformers docs/source/ --build_dir ~/tmp/test-build +doc-builder build transformers docs/source/en/ --build_dir ~/tmp/test-build ``` You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate From 47e1676255e5dd86b9541f734cd4f4bdcbb50f4a Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 8 Aug 2022 10:22:27 -0400 Subject: [PATCH 040/539] New cache fixes: add safeguard before looking in folders (#18522) --- src/transformers/utils/hub.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 5f9421912c49d7..570ff52e707532 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -133,6 +133,8 @@ def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: cache_dir = TRANSFORMERS_CACHE elif isinstance(cache_dir, Path): cache_dir = str(cache_dir) + if not os.path.isdir(cache_dir): + return [] cached_models = [] for file in os.listdir(cache_dir): @@ -210,6 +212,9 @@ def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None): if not os.path.isdir(model_cache): # No cache for this model return None + for subfolder in ["refs", "snapshots"]: + if not os.path.isdir(os.path.join(model_cache, subfolder)): + return None # Resolve refs (for instance to convert main to the associated commit sha) cached_refs = os.listdir(os.path.join(model_cache, "refs")) @@ -873,6 +878,8 @@ def get_all_cached_files(cache_dir=None): cache_dir = TRANSFORMERS_CACHE else: cache_dir = str(cache_dir) + if not os.path.isdir(cache_dir): + return [] cached_files = [] for file in os.listdir(cache_dir): From ec8d26248f708fabc9128a9746d27b576820aecd Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 8 Aug 2022 17:44:10 +0200 Subject: [PATCH 041/539] unpin resampy (#18527) Co-authored-by: ydshieh --- setup.py | 3 +-- src/transformers/dependency_versions_table.py | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 391de689ec4b84..05ec2c7617fd98 100644 --- a/setup.py +++ b/setup.py @@ -144,7 +144,6 @@ "ray[tune]", "regex!=2019.12.17", "requests", - "resampy<0.3.1", "rjieba", "rouge-score", "sacrebleu>=1.4.12,<2.0.0", @@ -270,7 +269,7 @@ def run(self): extras["integrations"] = extras["optuna"] + extras["ray"] + extras["sigopt"] extras["serving"] = deps_list("pydantic", "uvicorn", "fastapi", "starlette") -extras["audio"] = deps_list("librosa", "pyctcdecode", "phonemizer", "resampy") # resampy can be removed once unpinned. +extras["audio"] = deps_list("librosa", "pyctcdecode", "phonemizer") # `pip install ".[speech]"` is deprecated and `pip install ".[torch-speech]"` should be used instead extras["speech"] = deps_list("torchaudio") + extras["audio"] extras["torch-speech"] = deps_list("torchaudio") + extras["audio"] diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index bb98fcc024aa92..be3dba684bd58d 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -50,7 +50,6 @@ "ray[tune]": "ray[tune]", "regex": "regex!=2019.12.17", "requests": "requests", - "resampy": "resampy<0.3.1", "rjieba": "rjieba", "rouge-score": "rouge-score", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", From 36b37990aff227132af936a97ab2d010591e8dca Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Mon, 8 Aug 2022 08:53:52 -0700 Subject: [PATCH 042/539] =?UTF-8?q?=20=E2=9C=A8=20update=20to=20use=20inte?= =?UTF-8?q?rlibrary=20links=20instead=20of=20Markdown=20(#18500)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/source/en/accelerate.mdx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/source/en/accelerate.mdx b/docs/source/en/accelerate.mdx index c215758d47b6a3..02e05df3907492 100644 --- a/docs/source/en/accelerate.mdx +++ b/docs/source/en/accelerate.mdx @@ -22,7 +22,7 @@ Get started by installing 🤗 Accelerate: pip install accelerate ``` -Then import and create an [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator) object. `Accelerator` will automatically detect your type of distributed setup and initialize all the necessary components for training. You don't need to explicitly place your model on a device. +Then import and create an [`~accelerate.Accelerator`] object. The [`~accelerate.Accelerator`] will automatically detect your type of distributed setup and initialize all the necessary components for training. You don't need to explicitly place your model on a device. ```py >>> from accelerate import Accelerator @@ -32,7 +32,7 @@ Then import and create an [`Accelerator`](https://huggingface.co/docs/accelerate ## Prepare to accelerate -The next step is to pass all the relevant training objects to the [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare) method. This includes your training and evaluation DataLoaders, a model and an optimizer: +The next step is to pass all the relevant training objects to the [`~accelerate.Accelerator.prepare`] method. This includes your training and evaluation DataLoaders, a model and an optimizer: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( @@ -42,7 +42,7 @@ The next step is to pass all the relevant training objects to the [`prepare`](ht ## Backward -The last addition is to replace the typical `loss.backward()` in your training loop with 🤗 Accelerate's [`backward`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward) method: +The last addition is to replace the typical `loss.backward()` in your training loop with 🤗 Accelerate's [`~accelerate.Accelerator.backward`]method: ```py >>> for epoch in range(num_epochs): @@ -121,7 +121,7 @@ accelerate launch train.py ### Train with a notebook -🤗 Accelerate can also run in a notebook if you're planning on using Colaboratory's TPUs. Wrap all the code responsible for training in a function, and pass it to `notebook_launcher`: +🤗 Accelerate can also run in a notebook if you're planning on using Colaboratory's TPUs. Wrap all the code responsible for training in a function, and pass it to [`~accelerate.notebook_launcher`]: ```py >>> from accelerate import notebook_launcher From 3632531ec60beb03fd3b4f0d30f69853d8bcd5b4 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Mon, 8 Aug 2022 09:31:31 -0700 Subject: [PATCH 043/539] Add example of multimodal usage to pipeline tutorial (#18498) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📝 add example of multimodal usage to pipeline tutorial * 🖍 apply feedbacks * 🖍 apply niels feedback --- docs/source/en/pipeline_tutorial.mdx | 39 ++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/docs/source/en/pipeline_tutorial.mdx b/docs/source/en/pipeline_tutorial.mdx index 7929113209748d..95585b64359f49 100644 --- a/docs/source/en/pipeline_tutorial.mdx +++ b/docs/source/en/pipeline_tutorial.mdx @@ -12,21 +12,21 @@ specific language governing permissions and limitations under the License. # Pipelines for inference -The [`pipeline`] makes it simple to use any model from the [Model Hub](https://huggingface.co/models) for inference on a variety of tasks such as text generation, image segmentation and audio classification. Even if you don't have experience with a specific modality or understand the code powering the models, you can still use them with the [`pipeline`]! This tutorial will teach you to: +The [`pipeline`] makes it simple to use any model from the [Hub](https://huggingface.co/models) for inference on any language, computer vision, speech, and multimodal tasks. Even if you don't have experience with a specific modality or aren't familiar with the underlying code behind the models, you can still use them for inference with the [`pipeline`]! This tutorial will teach you to: * Use a [`pipeline`] for inference. * Use a specific tokenizer or model. -* Use a [`pipeline`] for audio and vision tasks. +* Use a [`pipeline`] for audio, vision, and multimodal tasks. -Take a look at the [`pipeline`] documentation for a complete list of supported tasks. +Take a look at the [`pipeline`] documentation for a complete list of supported tasks and available parameters. ## Pipeline usage -While each task has an associated [`pipeline`], it is simpler to use the general [`pipeline`] abstraction which contains all the specific task pipelines. The [`pipeline`] automatically loads a default model and tokenizer capable of inference for your task. +While each task has an associated [`pipeline`], it is simpler to use the general [`pipeline`] abstraction which contains all the task-specific pipelines. The [`pipeline`] automatically loads a default model and a preprocessing class capable of inference for your task. 1. Start by creating a [`pipeline`] and specify an inference task: @@ -67,7 +67,7 @@ Any additional parameters for your task can also be included in the [`pipeline`] ### Choose a model and tokenizer -The [`pipeline`] accepts any model from the [Model Hub](https://huggingface.co/models). There are tags on the Model Hub that allow you to filter for a model you'd like to use for your task. Once you've picked an appropriate model, load it with the corresponding `AutoModelFor` and [`AutoTokenizer`] class. For example, load the [`AutoModelForCausalLM`] class for a causal language modeling task: +The [`pipeline`] accepts any model from the [Hub](https://huggingface.co/models). There are tags on the Hub that allow you to filter for a model you'd like to use for your task. Once you've picked an appropriate model, load it with the corresponding `AutoModelFor` and [`AutoTokenizer`] class. For example, load the [`AutoModelForCausalLM`] class for a causal language modeling task: ```py >>> from transformers import AutoTokenizer, AutoModelForCausalLM @@ -95,7 +95,7 @@ Pass your input text to the [`pipeline`] to generate some text: ## Audio pipeline -The flexibility of the [`pipeline`] means it can also be extended to audio tasks. +The [`pipeline`] also supports audio tasks like audio classification and automatic speech recognition. For example, let's classify the emotion in this audio clip: @@ -129,9 +129,9 @@ Pass the audio file to the [`pipeline`]: ## Vision pipeline -Finally, using a [`pipeline`] for vision tasks is practically identical. +Using a [`pipeline`] for vision tasks is practically identical. -Specify your vision task and pass your image to the classifier. The imaage can be a link or a local path to the image. For example, what species of cat is shown below? +Specify your task and pass your image to the classifier. The image can be a link or a local path to the image. For example, what species of cat is shown below? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) @@ -146,3 +146,26 @@ Specify your vision task and pass your image to the classifier. The imaage can b >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` + +## Multimodal pipeline + +The [`pipeline`] supports more than one modality. For example, a visual question answering (VQA) task combines text and image. Feel free to use any image link you like and a question you want to ask about the image. The image can be a URL or a local path to the image. + +For example, if you use the same image from the vision pipeline above: + +```py +>>> image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" +>>> question = "Where is the cat?" +``` + +Create a pipeline for `vqa` and pass it the image and question: + +```py +>>> from transformers import pipeline + +>>> vqa = pipeline(task="vqa") +>>> preds = vqa(image=image, question=question) +>>> preds = [{"score": round(pred["score"], 4), "answer": pred["answer"]} for pred in preds] +>>> preds +[{'score': 0.9112, 'answer': 'snow'}, {'score': 0.8796, 'answer': 'in snow'}, {'score': 0.6717, 'answer': 'outside'}, {'score': 0.0291, 'answer': 'on ground'}, {'score': 0.027, 'answer': 'ground'}] +``` \ No newline at end of file From 82bb682643ceae1f03e6460ff4dfcb310c183570 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Mon, 8 Aug 2022 19:28:51 +0200 Subject: [PATCH 044/539] [VideoMAE] Add model to doc tests (#18523) * Add videomae to doc tests * Add pip install decord Co-authored-by: Niels Rogge --- docker/transformers-all-latest-gpu/Dockerfile | 2 ++ utils/documentation_tests.txt | 1 + 2 files changed, 3 insertions(+) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index d82c9f7c777c7e..e97a91f4246fb4 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -45,6 +45,8 @@ RUN python3 -m pip install -U "itsdangerous<2.1.0" RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate +RUN python3 -m pip install --no-cache-dir decord + # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index d523181eae2bde..1941a7343a6bc9 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -70,6 +70,7 @@ src/transformers/models/trocr/modeling_trocr.py src/transformers/models/unispeech/modeling_unispeech.py src/transformers/models/unispeech_sat/modeling_unispeech_sat.py src/transformers/models/van/modeling_van.py +src/transformers/models/videomae/modeling_videomae.py src/transformers/models/vilt/modeling_vilt.py src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py src/transformers/models/vit/modeling_vit.py From f1f5de31ed4372d4e33b6ae01b9e18f10d29de9b Mon Sep 17 00:00:00 2001 From: Mishig Davaadorj Date: Mon, 8 Aug 2022 20:33:34 +0200 Subject: [PATCH 045/539] Update perf_train_gpu_one.mdx (#18532) --- docs/source/en/perf_train_gpu_one.mdx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/source/en/perf_train_gpu_one.mdx b/docs/source/en/perf_train_gpu_one.mdx index ba5bcb456d2220..56cd6c6f10e333 100644 --- a/docs/source/en/perf_train_gpu_one.mdx +++ b/docs/source/en/perf_train_gpu_one.mdx @@ -719,13 +719,16 @@ For some applications, such as pretraining large language models, applying all t Another use case for training on many GPUs is if the model does not fit on a single GPU with all the mentioned tricks. There are still more methods we can apply although life starts to get a bit more complicated. This usually involves some form of pipeline or tensor parallelism where the model itself is distributed across several GPUs. One can also make use of DeepSpeed which implements some of these parallelism strategies along with some more optimization to reduce the memory footprint such as partitioning the optimizer states. You can read more about this in the ["Multi-GPU training" section](perf_train_gpu_many). ## Inference with torchdynamo + TorchDynamo is a new tracer that uses Python’s frame evaluation API to automatically create FX traces from existing PyTorch programs. After capturing the FX graph, different backends can be deployed to lower the graph to an optimized engine. One solution is using the [TensorRT](https://developer.nvidia.com/tensorrt) or NVFuser as backend. You can choose one option below for performance boost. + ``` TrainingArguments(torchdynamo="eager") #enable eager model GPU. No performance boost TrainingArguments(torchdynamo="nvfuser") #enable nvfuser TrainingArguments(torchdynamo="fx2trt") #enable tensorRT fp32 TrainingArguments(torchdynamo="fx2trt-f16") #enable tensorRT fp16 ``` + This feature involves 3 different libraries. To install them, please follow the instructions below: - [Torchdynamo installation](https://github.com/pytorch/torchdynamo#requirements-and-setup) - [Functorch installation](https://github.com/pytorch/functorch#install) From a765b68aa6f1509399776b6cdd3b329484ced1d4 Mon Sep 17 00:00:00 2001 From: Rasmus Arpe Fogh Jensen Date: Mon, 8 Aug 2022 21:52:47 +0200 Subject: [PATCH 046/539] Update no_trainer.py scripts to include accelerate gradient accumulation wrapper (#18473) * Added accelerate gradient accumulation wrapper to run_image_classification_no_trainer.py example script * make fixup changes * PR comments * changed input to Acceletor based on PR comment, ran make fixup * Added comment explaining the sync_gradients statement * Fixed lr scheduler max steps * Changed run_clm_no_trainer.py script to use accelerate gradient accum wrapper * Fixed all scripts except wav2vec2 pretraining to use accelerate gradient accum wrapper * Added accelerate gradient accum wrapper for wav2vec2_pretraining_no_trainer.py script * make fixup and lr_scheduler step inserted back into run_qa_beam_search_no_trainer.py * removed changes to run_wav2vec2_pretraining_no_trainer.py script and fixed using wrong constant in qa_beam_search_no_trainer.py script --- .../run_image_classification_no_trainer.py | 34 ++++++++++------- .../language-modeling/run_clm_no_trainer.py | 34 ++++++++++------- .../language-modeling/run_mlm_no_trainer.py | 34 ++++++++++------- .../multiple-choice/run_swag_no_trainer.py | 36 +++++++++++------- .../run_qa_beam_search_no_trainer.py | 37 +++++++++++++------ .../question-answering/run_qa_no_trainer.py | 35 +++++++++++------- .../run_semantic_segmentation_no_trainer.py | 34 ++++++++++------- .../run_summarization_no_trainer.py | 33 ++++++++++------- 8 files changed, 173 insertions(+), 104 deletions(-) diff --git a/examples/pytorch/image-classification/run_image_classification_no_trainer.py b/examples/pytorch/image-classification/run_image_classification_no_trainer.py index f10a54add79158..1bd190d1303e9a 100644 --- a/examples/pytorch/image-classification/run_image_classification_no_trainer.py +++ b/examples/pytorch/image-classification/run_image_classification_no_trainer.py @@ -212,9 +212,14 @@ def main(): # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment - accelerator = ( - Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator() - ) + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["logging_dir"] = args.output_dir + + accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) + logger.info(accelerator.state) # Make one log on every process with the configuration for debugging. logging.basicConfig( @@ -384,8 +389,8 @@ def collate_fn(examples): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. @@ -467,17 +472,20 @@ def collate_fn(examples): if resume_step is not None and step < resume_step: completed_steps += 1 continue - outputs = model(**batch) - loss = outputs.loss - # We keep track of the loss at each epoch - if args.with_tracking: - total_loss += loss.detach().float() - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index 21dc568fd44822..3fd67d5fbf66e4 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -249,9 +249,14 @@ def main(): # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment - accelerator = ( - Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator() - ) + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["logging_dir"] = args.output_dir + + accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) + # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -486,8 +491,8 @@ def group_texts(examples): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. @@ -567,17 +572,20 @@ def group_texts(examples): if resume_step is not None and step < resume_step: completed_steps += 1 continue - outputs = model(**batch) - loss = outputs.loss - # We keep track of the loss at each epoch - if args.with_tracking: - total_loss += loss.detach().float() - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index b7b085e5b61bea..80dfcf9a9194e5 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -258,9 +258,14 @@ def main(): # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment - accelerator = ( - Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator() - ) + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["logging_dir"] = args.output_dir + + accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) + # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -530,8 +535,8 @@ def group_texts(examples): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. @@ -611,17 +616,20 @@ def group_texts(examples): if resume_step is not None and step < resume_step: completed_steps += 1 continue - outputs = model(**batch) - loss = outputs.loss - # We keep track of the loss at each epoch - if args.with_tracking: - total_loss += loss.detach().float() - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 diff --git a/examples/pytorch/multiple-choice/run_swag_no_trainer.py b/examples/pytorch/multiple-choice/run_swag_no_trainer.py index a3868434b28504..eeb04e417fdfd6 100755 --- a/examples/pytorch/multiple-choice/run_swag_no_trainer.py +++ b/examples/pytorch/multiple-choice/run_swag_no_trainer.py @@ -65,7 +65,7 @@ def parse_args(): - parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") + parser = argparse.ArgumentParser(description="Finetune a transformers model on a multiple choice task") parser.add_argument( "--dataset_name", type=str, @@ -284,9 +284,14 @@ def main(): # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment - accelerator = ( - Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator() - ) + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["logging_dir"] = args.output_dir + + accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) + # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -483,8 +488,8 @@ def preprocess_function(examples): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. @@ -567,17 +572,20 @@ def preprocess_function(examples): if resume_step is not None and step < resume_step: completed_steps += 1 continue - outputs = model(**batch) - loss = outputs.loss - # We keep track of the loss at each epoch - if args.with_tracking: - total_loss += loss.detach().float() - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index 69ddf24ab5aa49..370dd3f43d9545 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -297,8 +297,16 @@ def main(): send_example_telemetry("run_qa_beam_search_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. - # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment - accelerator = Accelerator(log_with="all", logging_dir=args.output_dir) if args.with_tracking else Accelerator() + # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers + # in the environment + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["logging_dir"] = args.output_dir + + accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) + # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -739,8 +747,8 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. @@ -818,17 +826,22 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): if resume_step is not None and step < resume_step: completed_steps += 1 continue - outputs = model(**batch) - loss = outputs.loss - # We keep track of the loss at each epoch - if args.with_tracking: - total_loss += loss.detach().float() - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + + accelerator.backward(loss) + optimizer.step() lr_scheduler.step() optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index d98dca22bf2e48..6bf4eb28e99418 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -337,9 +337,14 @@ def main(): # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment - accelerator = ( - Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator() - ) + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["logging_dir"] = args.output_dir + + accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) + # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", @@ -757,8 +762,8 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. @@ -839,17 +844,21 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): if resume_step is not None and step < resume_step: completed_steps += 1 continue - outputs = model(**batch) - loss = outputs.loss - # We keep track of the loss at each epoch - if args.with_tracking: - total_loss += loss.detach().float() - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index 7ffb876d4db58f..30cb7cc53ae318 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -326,9 +326,14 @@ def main(): # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment - accelerator = ( - Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator() - ) + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["logging_dir"] = args.output_dir + + accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) + logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() @@ -487,8 +492,8 @@ def preprocess_val(example_batch): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. @@ -563,17 +568,20 @@ def preprocess_val(example_batch): if resume_step is not None and step < resume_step: completed_steps += 1 continue - outputs = model(**batch) - loss = outputs.loss - # We keep track of the loss at each epoch - if args.with_tracking: - total_loss += loss.detach().float() - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index ca9ef6ba9fa241..96781b6dcadbdd 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -330,9 +330,13 @@ def main(): # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment - accelerator = ( - Accelerator(log_with=args.report_to, logging_dir=args.output_dir) if args.with_tracking else Accelerator() - ) + accelerator_log_kwargs = {} + + if args.with_tracking: + accelerator_log_kwargs["log_with"] = args.report_to + accelerator_log_kwargs["logging_dir"] = args.output_dir + + accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) if args.source_prefix is None and args.model_name_or_path in [ "t5-small", "t5-base", @@ -552,8 +556,8 @@ def postprocess_text(preds, labels): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, - num_warmup_steps=args.num_warmup_steps, - num_training_steps=args.max_train_steps, + num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, + num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. @@ -635,17 +639,20 @@ def postprocess_text(preds, labels): if resume_step is not None and step < resume_step: completed_steps += 1 continue - outputs = model(**batch) - loss = outputs.loss - # We keep track of the loss at each epoch - if args.with_tracking: - total_loss += loss.detach().float() - loss = loss / args.gradient_accumulation_steps - accelerator.backward(loss) - if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: + + with accelerator.accumulate(model): + outputs = model(**batch) + loss = outputs.loss + # We keep track of the loss at each epoch + if args.with_tracking: + total_loss += loss.detach().float() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() + + # Checks if the accelerator has performed an optimization step behind the scenes + if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 From ed70f24291f110ebefb72a15582e04f2f2958280 Mon Sep 17 00:00:00 2001 From: Ian Castillo <7807897+donelianc@users.noreply.github.com> Date: Mon, 8 Aug 2022 21:53:43 +0200 Subject: [PATCH 047/539] Add Spanish translation of converting_tensorflow_models.mdx (#18512) * Add file in spanish docs to be translated * Finish translation to Spanish * Improve Spanish wording * Add suggested changes from review --- docs/source/es/_toctree.yml | 2 + .../es/converting_tensorflow_models.mdx | 149 ++++++++++++++++++ 2 files changed, 151 insertions(+) create mode 100644 docs/source/es/converting_tensorflow_models.mdx diff --git a/docs/source/es/_toctree.yml b/docs/source/es/_toctree.yml index 4145a9649139cc..bd5f917aea4ccb 100644 --- a/docs/source/es/_toctree.yml +++ b/docs/source/es/_toctree.yml @@ -39,6 +39,8 @@ title: Ejecutar el entrenamiento en Amazon SageMaker - local: multilingual title: Modelos multilingües para inferencia + - local: converting_tensorflow_models + title: Convertir checkpoints de TensorFlow title: Guías prácticas - sections: - local: philosophy diff --git a/docs/source/es/converting_tensorflow_models.mdx b/docs/source/es/converting_tensorflow_models.mdx new file mode 100644 index 00000000000000..2ab15e81b2508a --- /dev/null +++ b/docs/source/es/converting_tensorflow_models.mdx @@ -0,0 +1,149 @@ + + +# Convertir checkpoints de Tensorflow + +Te proporcionamos una interfaz de línea de comando (`CLI`, por sus siglas en inglés) para convertir puntos de control (_checkpoints_) originales de Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM en modelos que se puedan cargar utilizando los métodos `from_pretrained` de la biblioteca. + + + +Desde 2.3.0, el script para convertir es parte de la CLI de transformers (**transformers-cli**) disponible en cualquier instalación de transformers >= 2.3.0. + +La siguiente documentación refleja el formato para el comando **transformers-cli convert**. + + + +## BERT + +Puedes convertir cualquier checkpoint de TensorFlow para BERT (en particular, [los modelos pre-entrenados y publicados por Google](https://github.com/google-research/bert#pre-trained-models)) en un archivo de PyTorch mediante el script [convert_bert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py). + +Esta CLI toma como entrada un checkpoint de TensorFlow (tres archivos que comienzan con `bert_model.ckpt`) y el archivo de configuración asociado (`bert_config.json`), y crea un modelo PyTorch para esta configuración, carga los pesos del checkpoint de TensorFlow en el modelo de PyTorch y guarda el modelo resultante en un archivo estándar de PyTorch que se puede importar usando `from_pretrained()` (ve el ejemplo en [Tour rápido](quicktour), [run_glue.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_glue.py)). + +Solo necesitas ejecutar este script **una vez** para convertir un modelo a PyTorch. Después, puedes ignorar el checkpoint de TensorFlow (los tres archivos que comienzan con `bert_model.ckpt`), pero asegúrate de conservar el archivo de configuración (`bert_config.json`) y el archivo de vocabulario (`vocab.txt`) ya que estos también son necesarios para el modelo en PyTorch. + +Para ejecutar este script deberás tener instalado TensorFlow y PyTorch (`pip install tensorflow`). El resto del repositorio solo requiere PyTorch. + +Aquí hay un ejemplo del proceso para convertir un modelo `BERT-Base Uncased` pre-entrenado: + +```bash +export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 + +transformers-cli convert --model_type bert \ + --tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \ + --config $BERT_BASE_DIR/bert_config.json \ + --pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin +``` + +Puedes descargar los modelos pre-entrenados de Google para la conversión [aquí](https://github.com/google-research/bert#pre-trained-models). + +## ALBERT + +Convierte los checkpoints del modelo ALBERT de TensorFlow a PyTorch usando el script [convert_albert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py). + +La CLI toma como entrada un checkpoint de TensorFlow (tres archivos que comienzan con `model.ckpt-best`) y el archivo de configuración adjunto (`albert_config.json`), luego crea y guarda un modelo de PyTorch. Para ejecutar esta conversión deberás tener instalados TensorFlow y PyTorch. + +Aquí hay un ejemplo del proceso para convertir un modelo `ALBERT Base` pre-entrenado: + +```bash +export ALBERT_BASE_DIR=/path/to/albert/albert_base + +transformers-cli convert --model_type albert \ + --tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \ + --config $ALBERT_BASE_DIR/albert_config.json \ + --pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin +``` + +Puedes descargar los modelos pre-entrenados de Google para la conversión [aquí](https://github.com/google-research/albert#pre-trained-models). + +## OpenAI GPT + +Este es un ejemplo del proceso para convertir un modelo OpenAI GPT pre-entrenado, asumiendo que tu checkpoint de NumPy se guarda con el mismo formato que el modelo pre-entrenado de OpenAI (más información [aquí](https://github.com/openai/finetune-transformer-lm)): + +```bash +export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights + +transformers-cli convert --model_type gpt \ + --tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \ + --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ + [--config OPENAI_GPT_CONFIG] \ + [--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \ +``` + +## OpenAI GPT-2 + +Aquí hay un ejemplo del proceso para convertir un modelo OpenAI GPT-2 pre-entrenado (más información [aquí](https://github.com/openai/gpt-2)): + +```bash +export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/gpt2/pretrained/weights + +transformers-cli convert --model_type gpt2 \ + --tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \ + --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ + [--config OPENAI_GPT2_CONFIG] \ + [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] +``` + +## Transformer-XL + +Aquí hay un ejemplo del proceso para convertir un modelo Transformer-XL pre-entrenado (más información [aquí](https://github.com/kimiyoung/transformer-xl/tree/master/tf#obtain-and-evaluate-pretrained-sota-models)): + +```bash +export TRANSFO_XL_CHECKPOINT_FOLDER_PATH=/path/to/transfo/xl/checkpoint + +transformers-cli convert --model_type transfo_xl \ + --tf_checkpoint $TRANSFO_XL_CHECKPOINT_FOLDER_PATH \ + --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ + [--config TRANSFO_XL_CONFIG] \ + [--finetuning_task_name TRANSFO_XL_FINETUNED_TASK] +``` + +## XLNet + +Aquí hay un ejemplo del proceso para convertir un modelo XLNet pre-entrenado: + +```bash +export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint +export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config + +transformers-cli convert --model_type xlnet \ + --tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \ + --config $TRANSFO_XL_CONFIG_PATH \ + --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ + [--finetuning_task_name XLNET_FINETUNED_TASK] \ +``` + +## XLM + +Aquí hay un ejemplo del proceso para convertir un modelo XLM pre-entrenado: + +```bash +export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint + +transformers-cli convert --model_type xlm \ + --tf_checkpoint $XLM_CHECKPOINT_PATH \ + --pytorch_dump_output $PYTORCH_DUMP_OUTPUT + [--config XML_CONFIG] \ + [--finetuning_task_name XML_FINETUNED_TASK] +``` + +## T5 + +Aquí hay un ejemplo del proceso para convertir un modelo T5 pre-entrenado: + +```bash +export T5=/path/to/t5/uncased_L-12_H-768_A-12 + +transformers-cli convert --model_type t5 \ + --tf_checkpoint $T5/t5_model.ckpt \ + --config $T5/t5_config.json \ + --pytorch_dump_output $T5/pytorch_model.bin +``` From 499450ed758dc40b02a1af93c40315d8ead373be Mon Sep 17 00:00:00 2001 From: AguilaCudicio Date: Mon, 8 Aug 2022 16:54:11 -0300 Subject: [PATCH 048/539] Spanish translation of summarization.mdx (#15947) (#18477) * Add Spanish translation of summarization.mdx * Apply suggestions from code review Co-authored-by: Omar U. Espejel Co-authored-by: Omar U. Espejel --- docs/source/es/_toctree.yml | 4 +- docs/source/es/tasks/summarization.mdx | 222 +++++++++++++++++++++++++ 2 files changed, 225 insertions(+), 1 deletion(-) create mode 100644 docs/source/es/tasks/summarization.mdx diff --git a/docs/source/es/_toctree.yml b/docs/source/es/_toctree.yml index bd5f917aea4ccb..60566b9e6f9b47 100644 --- a/docs/source/es/_toctree.yml +++ b/docs/source/es/_toctree.yml @@ -30,6 +30,8 @@ - sections: - local: tasks/language_modeling title: Modelado de lenguaje + - local: tasks/summarization + title: Generación de resúmenes - local: tasks/image_classification title: Clasificación de imágenes title: Fine-tuning para tareas posteriores @@ -47,4 +49,4 @@ title: Filosofía - local: bertology title: BERTología - title: Guías conceptuales \ No newline at end of file + title: Guías conceptuales diff --git a/docs/source/es/tasks/summarization.mdx b/docs/source/es/tasks/summarization.mdx new file mode 100644 index 00000000000000..c09c4b0b833a13 --- /dev/null +++ b/docs/source/es/tasks/summarization.mdx @@ -0,0 +1,222 @@ + + +# Generación de resúmenes + + + +La generación de resúmenes (summarization, en inglés) crea una versión más corta de un documento o un artículo que resume toda su información importante. Junto con la traducción, es un ejemplo de una tarea que puede ser formulada como una tarea secuencia a secuencia. La generación de resúmenes puede ser: + +- Extractiva: Extrae la información más relevante de un documento. +- Abstractiva: Genera un texto nuevo que captura la información más importante. + +Esta guía te mostrará cómo puedes hacer fine-tuning del modelo [T5](https://huggingface.co/t5-small) sobre el subset de proyectos de ley del estado de California, dentro del dataset [BillSum](https://huggingface.co/datasets/billsum) para hacer generación de resúmenes abstractiva. + + + +Consulta la [página de la tarea](https://huggingface.co/tasks/summarization) de generación de resúmenes para obtener más información sobre sus modelos, datasets y métricas asociadas. + + + +## Carga el dataset BillSum + +Carga el dataset BillSum de la biblioteca 🤗 Datasets: + +```py +>>> from datasets import load_dataset + +>>> billsum = load_dataset("billsum", split="ca_test") +``` + +Divide el dataset en un set de train y un set de test: + +```py +>>> billsum = billsum.train_test_split(test_size=0.2) +``` + +A continuación, observa un ejemplo: + +```py +>>> billsum["train"][0] +{'summary': 'Existing law authorizes state agencies to enter into contracts for the acquisition of goods or services upon approval by the Department of General Services. Existing law sets forth various requirements and prohibitions for those contracts, including, but not limited to, a prohibition on entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between spouses and domestic partners or same-sex and different-sex couples in the provision of benefits. Existing law provides that a contract entered into in violation of those requirements and prohibitions is void and authorizes the state or any person acting on behalf of the state to bring a civil action seeking a determination that a contract is in violation and therefore void. Under existing law, a willful violation of those requirements and prohibitions is a misdemeanor.\nThis bill would also prohibit a state agency from entering into contracts for the acquisition of goods or services of $100,000 or more with a contractor that discriminates between employees on the basis of gender identity in the provision of benefits, as specified. By expanding the scope of a crime, this bill would impose a state-mandated local program.\nThe California Constitution requires the state to reimburse local agencies and school districts for certain costs mandated by the state. Statutory provisions establish procedures for making that reimbursement.\nThis bill would provide that no reimbursement is required by this act for a specified reason.', + 'text': 'The people of the State of California do enact as follows:\n\n\nSECTION 1.\nSection 10295.35 is added to the Public Contract Code, to read:\n10295.35.\n(a) (1) Notwithstanding any other law, a state agency shall not enter into any contract for the acquisition of goods or services in the amount of one hundred thousand dollars ($100,000) or more with a contractor that, in the provision of benefits, discriminates between employees on the basis of an employee’s or dependent’s actual or perceived gender identity, including, but not limited to, the employee’s or dependent’s identification as transgender.\n(2) For purposes of this section, “contract” includes contracts with a cumulative amount of one hundred thousand dollars ($100,000) or more per contractor in each fiscal year.\n(3) For purposes of this section, an employee health plan is discriminatory if the plan is not consistent with Section 1365.5 of the Health and Safety Code and Section 10140 of the Insurance Code.\n(4) The requirements of this section shall apply only to those portions of a contractor’s operations that occur under any of the following conditions:\n(A) Within the state.\n(B) On real property outside the state if the property is owned by the state or if the state has a right to occupy the property, and if the contractor’s presence at that location is connected to a contract with the state.\n(C) Elsewhere in the United States where work related to a state contract is being performed.\n(b) Contractors shall treat as confidential, to the maximum extent allowed by law or by the requirement of the contractor’s insurance provider, any request by an employee or applicant for employment benefits or any documentation of eligibility for benefits submitted by an employee or applicant for employment.\n(c) After taking all reasonable measures to find a contractor that complies with this section, as determined by the state agency, the requirements of this section may be waived under any of the following circumstances:\n(1) There is only one prospective contractor willing to enter into a specific contract with the state agency.\n(2) The contract is necessary to respond to an emergency, as determined by the state agency, that endangers the public health, welfare, or safety, or the contract is necessary for the provision of essential services, and no entity that complies with the requirements of this section capable of responding to the emergency is immediately available.\n(3) The requirements of this section violate, or are inconsistent with, the terms or conditions of a grant, subvention, or agreement, if the agency has made a good faith attempt to change the terms or conditions of any grant, subvention, or agreement to authorize application of this section.\n(4) The contractor is providing wholesale or bulk water, power, or natural gas, the conveyance or transmission of the same, or ancillary services, as required for ensuring reliable services in accordance with good utility practice, if the purchase of the same cannot practically be accomplished through the standard competitive bidding procedures and the contractor is not providing direct retail services to end users.\n(d) (1) A contractor shall not be deemed to discriminate in the provision of benefits if the contractor, in providing the benefits, pays the actual costs incurred in obtaining the benefit.\n(2) If a contractor is unable to provide a certain benefit, despite taking reasonable measures to do so, the contractor shall not be deemed to discriminate in the provision of benefits.\n(e) (1) Every contract subject to this chapter shall contain a statement by which the contractor certifies that the contractor is in compliance with this section.\n(2) The department or other contracting agency shall enforce this section pursuant to its existing enforcement powers.\n(3) (A) If a contractor falsely certifies that it is in compliance with this section, the contract with that contractor shall be subject to Article 9 (commencing with Section 10420), unless, within a time period specified by the department or other contracting agency, the contractor provides to the department or agency proof that it has complied, or is in the process of complying, with this section.\n(B) The application of the remedies or penalties contained in Article 9 (commencing with Section 10420) to a contract subject to this chapter shall not preclude the application of any existing remedies otherwise available to the department or other contracting agency under its existing enforcement powers.\n(f) Nothing in this section is intended to regulate the contracting practices of any local jurisdiction.\n(g) This section shall be construed so as not to conflict with applicable federal laws, rules, or regulations. In the event that a court or agency of competent jurisdiction holds that federal law, rule, or regulation invalidates any clause, sentence, paragraph, or section of this code or the application thereof to any person or circumstances, it is the intent of the state that the court or agency sever that clause, sentence, paragraph, or section so that the remainder of this section shall remain in effect.\nSEC. 2.\nSection 10295.35 of the Public Contract Code shall not be construed to create any new enforcement authority or responsibility in the Department of General Services or any other contracting agency.\nSEC. 3.\nNo reimbursement is required by this act pursuant to Section 6 of Article XIII\u2009B of the California Constitution because the only costs that may be incurred by a local agency or school district will be incurred because this act creates a new crime or infraction, eliminates a crime or infraction, or changes the penalty for a crime or infraction, within the meaning of Section 17556 of the Government Code, or changes the definition of a crime within the meaning of Section 6 of Article XIII\u2009B of the California Constitution.', + 'title': 'An act to add Section 10295.35 to the Public Contract Code, relating to public contracts.'} +``` + +El campo `text` es el input y el campo `summary` es el objetivo. + +## Preprocesa + +Carga el tokenizador T5 para procesar `text` y `summary`: + +```py +>>> from transformers import AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("t5-small") +``` + +La función de preprocesamiento necesita: + +1. Agregar un prefijo al input; una clave para que T5 sepa que se trata de una tarea de generación de resúmenes. Algunos modelos capaces de realizar múltiples tareas de NLP requieren una clave que indique la tarea específica. +2. Usar el argumento `text_target` para tokenizar etiquetas. +3. Truncar secuencias para que no sean más largas que la longitud máxima fijada por el parámetro `max_length`. + +```py +>>> prefix = "summarize: " + + +>>> def preprocess_function(examples): +... inputs = [prefix + doc for doc in examples["text"]] +... model_inputs = tokenizer(inputs, max_length=1024, truncation=True) + +... labels = tokenizer(text_target=examples["summary"], max_length=128, truncation=True) + +... model_inputs["labels"] = labels["input_ids"] +... return model_inputs +``` + +Usa la función [`~datasets.Dataset.map`] de 🤗 Datasets para aplicar la función de preprocesamiento sobre el dataset en su totalidad. Puedes acelerar la función `map` configurando el argumento `batched=True` para procesar múltiples elementos del dataset a la vez: + +```py +>>> tokenized_billsum = billsum.map(preprocess_function, batched=True) +``` + +Usa [`DataCollatorForSeq2Seq`] para crear un lote de ejemplos. Esto también *rellenará dinámicamente* tu texto y etiquetas a la dimensión del elemento más largo del lote para que tengan un largo uniforme. Si bien es posible rellenar tu texto en la función `tokenizer` mediante el argumento `padding=True`, el rellenado dinámico es más eficiente. + + + +```py +>>> from transformers import DataCollatorForSeq2Seq + +>>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model) +``` + + +```py +>>> from transformers import DataCollatorForSeq2Seq + +>>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=model, return_tensors="tf") +``` + + + +## Entrenamiento + + + +Carga T5 con [`AutoModelForSeq2SeqLM`]: + +```py +>>> from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer + +>>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-small") +``` + + + +Para familiarizarte con el proceso para realizar fine-tuning sobre un modelo con [`Trainer`], ¡mira el tutorial básico [aquí](../training#finetune-with-trainer)! + + + +En este punto, solo faltan tres pasos: + +1. Definir tus hiperparámetros de entrenamiento en [`Seq2SeqTrainingArguments`]. +2. Pasarle los argumentos de entrenamiento a [`Seq2SeqTrainer`] junto con el modelo, dataset y data collator. +3. Llamar [`~Trainer.train`] para realizar el fine-tuning sobre tu modelo. + +```py +>>> training_args = Seq2SeqTrainingArguments( +... output_dir="./results", +... evaluation_strategy="epoch", +... learning_rate=2e-5, +... per_device_train_batch_size=16, +... per_device_eval_batch_size=16, +... weight_decay=0.01, +... save_total_limit=3, +... num_train_epochs=1, +... fp16=True, +... ) + +>>> trainer = Seq2SeqTrainer( +... model=model, +... args=training_args, +... train_dataset=tokenized_billsum["train"], +... eval_dataset=tokenized_billsum["test"], +... tokenizer=tokenizer, +... data_collator=data_collator, +... ) + +>>> trainer.train() +``` + + +Para hacer fine-tuning de un modelo en TensorFlow, comienza por convertir tus datasets al formato `tf.data.Dataset` con [`~datasets.Dataset.to_tf_dataset`]. Especifica los inputs y etiquetas en `columns`, el tamaño de lote, el data collator, y si es necesario mezclar el dataset: + +```py +>>> tf_train_set = tokenized_billsum["train"].to_tf_dataset( +... columns=["attention_mask", "input_ids", "labels"], +... shuffle=True, +... batch_size=16, +... collate_fn=data_collator, +... ) + +>>> tf_test_set = tokenized_billsum["test"].to_tf_dataset( +... columns=["attention_mask", "input_ids", "labels"], +... shuffle=False, +... batch_size=16, +... collate_fn=data_collator, +... ) +``` + + + +Para familiarizarte con el fine-tuning con Keras, ¡mira el tutorial básico [aquí](training#finetune-with-keras)! + + + +Crea la función optimizadora, establece la tasa de aprendizaje y algunos hiperparámetros de entrenamiento: + +```py +>>> from transformers import create_optimizer, AdamWeightDecay + +>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) +``` + +Carga T5 con [`TFAutoModelForSeq2SeqLM`]: + +```py +>>> from transformers import TFAutoModelForSeq2SeqLM + +>>> model = TFAutoModelForSeq2SeqLM.from_pretrained("t5-small") +``` + +Configura el modelo para entrenamiento con [`compile`](https://keras.io/api/models/model_training_apis/#compile-method): + +```py +>>> model.compile(optimizer=optimizer) +``` + +Llama a [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) para realizar el fine-tuning del modelo: + +```py +>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3) +``` + + + + + +Para un ejemplo con mayor profundidad de cómo hacer fine-tuning a un modelo para generación de resúmenes, revisa la +[notebook en PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb) +o a la [notebook en TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb). + + \ No newline at end of file From ab62a23d8c4927a26775a01cd0cca7ba77368e04 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Mon, 8 Aug 2022 23:48:49 +0200 Subject: [PATCH 049/539] Let's not cast them all (#18471) * add correct dtypes when checking for params dtype * forward contrib credits * Update src/transformers/modeling_utils.py Co-authored-by: Thomas Wang <24695242+thomasw21@users.noreply.github.com> * more comments - added more comments on why we cast only floating point parameters * Update src/transformers/modeling_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: sgugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Thomas Wang <24695242+thomasw21@users.noreply.github.com> --- src/transformers/modeling_utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 2a86128c221bec..8bce35f9e336d9 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -543,8 +543,10 @@ def _load_state_dict_into_meta_model( param_name = param_name[len(start_prefix) :] module_name = param_name - # We convert floating dtypes to the `dtype` passed. - if dtype is not None and not str(param.dtype).startswith("torch.int"): + + # We convert floating dtypes to the `dtype` passed.We want to keep the buffers/params + # in int/uint/bool and not cast them. + if dtype is not None and torch.is_floating_point(param): param = param.to(dtype) if device_map is None: From daf573fefa444102c2816ce7c17ba5abcfc3238e Mon Sep 17 00:00:00 2001 From: "Duong A. Nguyen" <38061659+duongna21@users.noreply.github.com> Date: Tue, 9 Aug 2022 10:20:56 +0700 Subject: [PATCH 050/539] Update examples/flax/language-modeling/run_bart_dlm_flax.py Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> --- examples/flax/language-modeling/run_bart_dlm_flax.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index e6e4b8f0d0cd11..3094374bc5fe3b 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -810,7 +810,7 @@ def loss_fn(params): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") From 156a57666e13685512dc76cb06dc703c42b845ff Mon Sep 17 00:00:00 2001 From: duongna21 Date: Tue, 9 Aug 2022 13:34:02 +0700 Subject: [PATCH 051/539] jax.tree_map => jax.tree_util.tree_map --- .../image-captioning/run_image_captioning_flax.py | 10 +++++----- .../flax/language-modeling/run_bart_dlm_flax.py | 12 ++++++------ examples/flax/language-modeling/run_clm_flax.py | 4 ++-- examples/flax/language-modeling/run_mlm_flax.py | 14 +++++++------- examples/flax/language-modeling/run_t5_mlm_flax.py | 6 +++--- .../flax/summarization/run_summarization_flax.py | 12 ++++++------ 6 files changed, 29 insertions(+), 29 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 6917a5ebb66001..caa336f4536c3c 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -942,11 +942,11 @@ def compute_loss(params): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") - grad = jax.tree_map(lambda x: x / num_labels, grad) + grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} @@ -962,7 +962,7 @@ def eval_step(params, batch, label_smoothing_factor=0.0): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) metrics = {"loss": loss} return metrics @@ -1017,7 +1017,7 @@ def save_ckpt(ckpt_dir: str, commit_msg: str = ""): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir), params=params) tokenizer.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir)) if training_args.push_to_hub: @@ -1069,7 +1069,7 @@ def evaluation_loop( if metrics: # normalize metrics metrics = get_metrics(metrics) - metrics = jax.tree_map(jnp.mean, metrics) + metrics = jax.tree_util.tree_map(jnp.mean, metrics) # compute ROUGE metrics generations = [] diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 3094374bc5fe3b..537a7c5e3f9864 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -814,7 +814,7 @@ def loss_fn(params): # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") - grad = jax.tree_map(lambda x: x / num_labels, grad) + grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} @@ -909,9 +909,9 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.sum, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})" @@ -923,7 +923,7 @@ def eval_step(params, batch): if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: @@ -949,9 +949,9 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) try: perplexity = math.exp(eval_metrics["loss"]) diff --git a/examples/flax/language-modeling/run_clm_flax.py b/examples/flax/language-modeling/run_clm_flax.py index 5fe786da7cc5ad..b6d22e1af94130 100755 --- a/examples/flax/language-modeling/run_clm_flax.py +++ b/examples/flax/language-modeling/run_clm_flax.py @@ -781,7 +781,7 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) try: eval_metrics["perplexity"] = math.exp(eval_metrics["loss"]) @@ -824,7 +824,7 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(lambda x: jnp.mean(x).item(), eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: jnp.mean(x).item(), eval_metrics) try: eval_metrics["perplexity"] = math.exp(eval_metrics["loss"]) diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index caf3f5f8190ee6..3fc91b2130f2ba 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -725,11 +725,11 @@ def loss_fn(params): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") - grad = jax.tree_map(lambda x: x / num_labels, grad) + grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} @@ -825,9 +825,9 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.sum, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})" @@ -839,7 +839,7 @@ def eval_step(params, batch): if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: @@ -865,9 +865,9 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) try: perplexity = math.exp(eval_metrics["loss"]) diff --git a/examples/flax/language-modeling/run_t5_mlm_flax.py b/examples/flax/language-modeling/run_t5_mlm_flax.py index e174f0b38d0ed5..1028704092835e 100755 --- a/examples/flax/language-modeling/run_t5_mlm_flax.py +++ b/examples/flax/language-modeling/run_t5_mlm_flax.py @@ -938,7 +938,7 @@ def eval_step(params, batch): # get eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # Update progress bar epochs.write(f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})") @@ -950,7 +950,7 @@ def eval_step(params, batch): if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: @@ -976,7 +976,7 @@ def eval_step(params, batch): # get eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(lambda metric: jnp.mean(metric).item(), eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.mean(metric).item(), eval_metrics) if jax.process_index() == 0: eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()} diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index 2c82ab7bb2136f..34e833d1e6953d 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -795,11 +795,11 @@ def compute_loss(params): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") - grad = jax.tree_map(lambda x: x / num_labels, grad) + grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} @@ -815,7 +815,7 @@ def eval_step(params, batch, label_smoothing_factor=0.0): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) metrics = {"loss": loss} return metrics @@ -903,7 +903,7 @@ def generate_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # compute ROUGE metrics rouge_desc = "" @@ -924,7 +924,7 @@ def generate_step(params, batch): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: @@ -958,7 +958,7 @@ def generate_step(params, batch): # normalize prediction metrics pred_metrics = get_metrics(pred_metrics) - pred_metrics = jax.tree_map(jnp.mean, pred_metrics) + pred_metrics = jax.tree_util.tree_map(jnp.mean, pred_metrics) # compute ROUGE metrics rouge_desc = "" From fe785730dcbf3390aa07f667e8d3c4b02d6638e0 Mon Sep 17 00:00:00 2001 From: Niklas Hansson Date: Tue, 9 Aug 2022 09:35:05 +0200 Subject: [PATCH 052/539] fix: data2vec-vision Onnx ready-made configuration. (#18427) * feat: add the data2vec conf that are missing https://huggingface.co/docs/transformers/serialization * fix: wrong config --- src/transformers/onnx/features.py | 6 ++++++ tests/onnx/test_onnx_v2.py | 1 + 2 files changed, 7 insertions(+) diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index e7c24a8ad97a81..3eea94c8c1a64e 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -229,6 +229,12 @@ class FeaturesManager: "question-answering", onnx_config_cls="models.data2vec.Data2VecTextOnnxConfig", ), + "data2vec-vision": supported_features_mapping( + "default", + "image-classification", + "image-segmentation", + onnx_config_cls="models.data2vec.Data2VecVisionOnnxConfig", + ), "deberta": supported_features_mapping( "default", "masked-lm", diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index cfc58dd335c30d..c22406841afdae 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -207,6 +207,7 @@ def test_values_override(self): ("deit", "facebook/deit-small-patch16-224"), ("beit", "microsoft/beit-base-patch16-224"), ("data2vec-text", "facebook/data2vec-text-base"), + ("data2vec-vision", "facebook/data2vec-vision-base"), ("perceiver", "deepmind/language-perceiver", ("masked-lm", "sequence-classification")), ("perceiver", "deepmind/vision-perceiver-conv", ("image-classification",)), ("yolos", "hustvl/yolos-tiny"), From 8cb5ecd912e09301be126c6ce6e9a22ca7153da4 Mon Sep 17 00:00:00 2001 From: Thomas Chaigneau Date: Tue, 9 Aug 2022 09:46:53 +0200 Subject: [PATCH 053/539] Add mt5 onnx config (#18394) * update features * MT5OnnxConfig added with updated with tests and docs * fix imports * fix onnc_config_cls for mt5 Co-authored-by: Thomas Chaigneau --- docs/source/en/serialization.mdx | 1 + src/transformers/models/mt5/__init__.py | 4 +-- .../models/mt5/configuration_mt5.py | 28 +++++++++++++++++++ src/transformers/onnx/features.py | 7 +++++ tests/onnx/test_onnx_v2.py | 1 + 5 files changed, 39 insertions(+), 2 deletions(-) diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index e41ccae949e8bb..9561bbd8ec77c1 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -79,6 +79,7 @@ Ready-made configurations include the following architectures: - mBART - MobileBERT - MobileViT +- MT5 - OpenAI GPT-2 - Perceiver - PLBart diff --git a/src/transformers/models/mt5/__init__.py b/src/transformers/models/mt5/__init__.py index 3f04a256918bc3..f6e717bd875b52 100644 --- a/src/transformers/models/mt5/__init__.py +++ b/src/transformers/models/mt5/__init__.py @@ -43,7 +43,7 @@ MT5TokenizerFast = T5TokenizerFast -_import_structure = {"configuration_mt5": ["MT5Config"]} +_import_structure = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]} try: if not is_torch_available(): @@ -71,7 +71,7 @@ if TYPE_CHECKING: - from .configuration_mt5 import MT5Config + from .configuration_mt5 import MT5Config, MT5OnnxConfig try: if not is_torch_available(): diff --git a/src/transformers/models/mt5/configuration_mt5.py b/src/transformers/models/mt5/configuration_mt5.py index ad0345f53189e9..3e72831ad25fbc 100644 --- a/src/transformers/models/mt5/configuration_mt5.py +++ b/src/transformers/models/mt5/configuration_mt5.py @@ -13,8 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. """ mT5 model configuration""" +from typing import Mapping from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxSeq2SeqConfigWithPast from ...utils import logging @@ -143,3 +145,29 @@ def num_attention_heads(self): @property def num_hidden_layers(self): return self.num_layers + + +# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig +class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + common_inputs = { + "input_ids": {0: "batch", 1: "encoder_sequence"}, + "attention_mask": {0: "batch", 1: "encoder_sequence"}, + } + if self.use_past: + common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence" + common_inputs["decoder_input_ids"] = {0: "batch"} + common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} + else: + common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} + common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} + + if self.use_past: + self.fill_with_past_key_values_(common_inputs, direction="inputs") + + return common_inputs + + @property + def default_onnx_opset(self) -> int: + return 13 diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index 3eea94c8c1a64e..8d8b8190e46819 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -383,6 +383,13 @@ class FeaturesManager: "image-classification", onnx_config_cls="models.mobilevit.MobileViTOnnxConfig", ), + "mt5": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.mt5.MT5OnnxConfig", + ), "m2m-100": supported_features_mapping( "default", "default-with-past", diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index c22406841afdae..98ab0fad131e01 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -224,6 +224,7 @@ def test_values_override(self): ("mbart", "sshleifer/tiny-mbart"), ("t5", "t5-small"), ("marian", "Helsinki-NLP/opus-mt-en-de"), + ("mt5", "google/mt5-base"), ("m2m-100", "facebook/m2m100_418M"), ("blenderbot-small", "facebook/blenderbot_small-90M"), ("blenderbot", "facebook/blenderbot-400M-distill"), From 6bea7b8178da87e2b94bfc61260e93b4c6c61431 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 9 Aug 2022 14:33:41 +0200 Subject: [PATCH 054/539] Minor update of `run_call_with_unpacked_inputs` (#18541) Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: ydshieh --- src/transformers/modeling_tf_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 703440d80ad71b..68ee4117a2f9db 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -398,7 +398,7 @@ def run_call_with_unpacked_inputs(self, *args, **kwargs): fn_args_and_kwargs.update(dict(zip(func.__code__.co_varnames[1:], args))) # Encoder Decoder models delegate the application of the configuration options to their inner models. - if "encoder_decoder" in str(self).lower(): + if "EncoderDecoder" in self.__class__.__name__: config = None else: config = self.config From ab2006e3d6db88654526a4169e65d4bfc52da2e3 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Tue, 9 Aug 2022 14:47:18 +0200 Subject: [PATCH 055/539] BART - Fix attention mask device issue on copied models (#18540) * attempt to fix attn mask device * fix bart `_prepare_decoder_attention_mask` - add correct device - run `make fix-copies` to propagate the fix --- src/transformers/models/bart/modeling_bart.py | 4 +++- .../models/bigbird_pegasus/modeling_bigbird_pegasus.py | 4 +++- src/transformers/models/blenderbot/modeling_blenderbot.py | 4 +++- .../models/blenderbot_small/modeling_blenderbot_small.py | 4 +++- src/transformers/models/marian/modeling_marian.py | 4 +++- src/transformers/models/mbart/modeling_mbart.py | 4 +++- src/transformers/models/opt/modeling_opt.py | 4 +++- src/transformers/models/pegasus/modeling_pegasus.py | 4 +++- src/transformers/models/plbart/modeling_plbart.py | 4 +++- 9 files changed, 27 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index 011eee1f24b54b..8411cc6cefefed 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -915,7 +915,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index ce5040e92c7fa3..3cdfe7d2ffe097 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -2116,7 +2116,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) diff --git a/src/transformers/models/blenderbot/modeling_blenderbot.py b/src/transformers/models/blenderbot/modeling_blenderbot.py index 2a53099d9c4ce5..303a5c4f256997 100755 --- a/src/transformers/models/blenderbot/modeling_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_blenderbot.py @@ -854,7 +854,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) diff --git a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py index e5b717ef9c181d..8dac9b6a7573c2 100755 --- a/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py @@ -850,7 +850,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) diff --git a/src/transformers/models/marian/modeling_marian.py b/src/transformers/models/marian/modeling_marian.py index 32e59098ef11dc..26dc6b12dc9fe6 100755 --- a/src/transformers/models/marian/modeling_marian.py +++ b/src/transformers/models/marian/modeling_marian.py @@ -860,7 +860,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index b9057178a03227..16ea95bc0aedde 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -913,7 +913,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 8a1c021c845052..419c2391e4c708 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -534,7 +534,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) diff --git a/src/transformers/models/pegasus/modeling_pegasus.py b/src/transformers/models/pegasus/modeling_pegasus.py index 9e797af035cf0a..5a144aa3e9c514 100755 --- a/src/transformers/models/pegasus/modeling_pegasus.py +++ b/src/transformers/models/pegasus/modeling_pegasus.py @@ -880,7 +880,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) diff --git a/src/transformers/models/plbart/modeling_plbart.py b/src/transformers/models/plbart/modeling_plbart.py index eb8b5d2b41671c..d03ddf33ebfa7a 100755 --- a/src/transformers/models/plbart/modeling_plbart.py +++ b/src/transformers/models/plbart/modeling_plbart.py @@ -887,7 +887,9 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) From 9f5fe635483c9c80d59c9523eef5cf19ed93174c Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 9 Aug 2022 18:50:02 +0200 Subject: [PATCH 056/539] Adding a new `align_to_words` param to qa pipeline. (#18010) * Adding a new `align_to_words` param to qa pipeline. * Update src/transformers/pipelines/question_answering.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Import protection. Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- .../pipelines/question_answering.py | 48 +++++++++++++++---- .../test_pipelines_question_answering.py | 23 +++++++++ 2 files changed, 61 insertions(+), 10 deletions(-) diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index d58762035ef7f8..6f07382dc57c6b 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -8,7 +8,14 @@ from ..data import SquadExample, SquadFeatures, squad_convert_examples_to_features from ..modelcard import ModelCard from ..tokenization_utils import PreTrainedTokenizer -from ..utils import PaddingStrategy, add_end_docstrings, is_tf_available, is_torch_available, logging +from ..utils import ( + PaddingStrategy, + add_end_docstrings, + is_tf_available, + is_tokenizers_available, + is_torch_available, + logging, +) from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline @@ -18,6 +25,9 @@ from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel + if is_tokenizers_available(): + import tokenizers + if is_tf_available(): import tensorflow as tf @@ -180,6 +190,7 @@ def _sanitize_parameters( max_seq_len=None, max_question_len=None, handle_impossible_answer=None, + align_to_words=None, **kwargs ): # Set defaults values @@ -208,6 +219,8 @@ def _sanitize_parameters( postprocess_params["max_answer_len"] = max_answer_len if handle_impossible_answer is not None: postprocess_params["handle_impossible_answer"] = handle_impossible_answer + if align_to_words is not None: + postprocess_params["align_to_words"] = align_to_words return preprocess_params, {}, postprocess_params def __call__(self, *args, **kwargs): @@ -243,6 +256,9 @@ def __call__(self, *args, **kwargs): The maximum length of the question after tokenization. It will be truncated if needed. handle_impossible_answer (`bool`, *optional*, defaults to `False`): Whether or not we accept impossible as an answer. + align_to_words (`bool`, *optional*, defaults to `True`): + Attempts to align the answer to real words. Improves quality on space separated langages. Might hurt on + non-space-separated languages (like Japanese or Chinese) Return: A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: @@ -386,6 +402,7 @@ def postprocess( top_k=1, handle_impossible_answer=False, max_answer_len=15, + align_to_words=True, ): min_null_score = 1000000 # large and positive answers = [] @@ -464,15 +481,8 @@ def postprocess( for s, e, score in zip(starts, ends, scores): s = s - offset e = e - offset - try: - start_word = enc.token_to_word(s) - end_word = enc.token_to_word(e) - start_index = enc.word_to_chars(start_word, sequence_index=sequence_index)[0] - end_index = enc.word_to_chars(end_word, sequence_index=sequence_index)[1] - except Exception: - # Some tokenizers don't really handle words. Keep to offsets then. - start_index = enc.offsets[s][0] - end_index = enc.offsets[e][1] + + start_index, end_index = self.get_indices(enc, s, e, sequence_index, align_to_words) answers.append( { @@ -490,6 +500,24 @@ def postprocess( return answers[0] return answers + def get_indices( + self, enc: "tokenizers.Encoding", s: int, e: int, sequence_index: int, align_to_words: bool + ) -> Tuple[int, int]: + if align_to_words: + try: + start_word = enc.token_to_word(s) + end_word = enc.token_to_word(e) + start_index = enc.word_to_chars(start_word, sequence_index=sequence_index)[0] + end_index = enc.word_to_chars(end_word, sequence_index=sequence_index)[1] + except Exception: + # Some tokenizers don't really handle words. Keep to offsets then. + start_index = enc.offsets[s][0] + end_index = enc.offsets[e][1] + else: + start_index = enc.offsets[s][0] + end_index = enc.offsets[e][1] + return start_index, end_index + def decode( self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray ) -> Tuple: diff --git a/tests/pipelines/test_pipelines_question_answering.py b/tests/pipelines/test_pipelines_question_answering.py index c3a0da2f2b5e9a..001254aa94b01e 100644 --- a/tests/pipelines/test_pipelines_question_answering.py +++ b/tests/pipelines/test_pipelines_question_answering.py @@ -171,6 +171,29 @@ def ensure_large_logits_postprocess( self.assertEqual(nested_simplify(outputs), {"score": 0.028, "start": 0, "end": 11, "answer": "HuggingFace"}) + @slow + @require_torch + def test_small_model_japanese(self): + question_answerer = pipeline( + "question-answering", + model="KoichiYasuoka/deberta-base-japanese-aozora-ud-head", + ) + output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている") + + # Wrong answer, the whole text is identified as one "word" since the tokenizer does not include + # a pretokenizer + self.assertEqual( + nested_simplify(output), + {"score": 1.0, "start": 0, "end": 30, "answer": "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"}, + ) + + # Disable word alignment + output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている", align_to_words=False) + self.assertEqual( + nested_simplify(output), + {"score": 1.0, "start": 15, "end": 18, "answer": "教科書"}, + ) + @slow @require_torch def test_small_model_long_context_cls_slow(self): From 0c183cc2f4d6121705d7c39011b34c191e194d16 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Tue, 9 Aug 2022 09:58:11 -0700 Subject: [PATCH 057/539] =?UTF-8?q?=20=F0=9F=93=9D=20update=20metric=20wit?= =?UTF-8?q?h=20evaluate=20(#18535)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/source/en/training.mdx | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/docs/source/en/training.mdx b/docs/source/en/training.mdx index 6b0ec8a4081050..9222d27ac81f6e 100644 --- a/docs/source/en/training.mdx +++ b/docs/source/en/training.mdx @@ -98,18 +98,18 @@ Specify where to save the checkpoints from your training: >>> training_args = TrainingArguments(output_dir="test_trainer") ``` -### Metrics +### Evaluate -[`Trainer`] does not automatically evaluate model performance during training. You will need to pass [`Trainer`] a function to compute and report metrics. The 🤗 Datasets library provides a simple [`accuracy`](https://huggingface.co/metrics/accuracy) function you can load with the `load_metric` (see this [tutorial](https://huggingface.co/docs/datasets/metrics.html) for more information) function: +[`Trainer`] does not automatically evaluate model performance during training. You'll need to pass [`Trainer`] a function to compute and report metrics. The [🤗 Evaluate](https://huggingface.co/docs/evaluate/index) library provides a simple [`accuracy`](https://huggingface.co/spaces/evaluate-metric/accuracy) function you can load with the [`evaluate.load`] (see this [quicktour](https://huggingface.co/docs/evaluate/a_quick_tour) for more information) function: ```py >>> import numpy as np ->>> from datasets import load_metric +>>> import evaluate ->>> metric = load_metric("accuracy") +>>> metric = evaluate.load("accuracy") ``` -Call `compute` on `metric` to calculate the accuracy of your predictions. Before passing your predictions to `compute`, you need to convert the predictions to logits (remember all 🤗 Transformers models return logits): +Call [`~evaluate.compute`] on `metric` to calculate the accuracy of your predictions. Before passing your predictions to `compute`, you need to convert the predictions to logits (remember all 🤗 Transformers models return logits): ```py >>> def compute_metrics(eval_pred): @@ -341,12 +341,14 @@ To keep track of your training progress, use the [tqdm](https://tqdm.github.io/) ... progress_bar.update(1) ``` -### Metrics +### Evaluate -Just like how you need to add an evaluation function to [`Trainer`], you need to do the same when you write your own training loop. But instead of calculating and reporting the metric at the end of each epoch, this time you will accumulate all the batches with [`add_batch`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=add_batch#datasets.Metric.add_batch) and calculate the metric at the very end. +Just like how you added an evaluation function to [`Trainer`], you need to do the same when you write your own training loop. But instead of calculating and reporting the metric at the end of each epoch, this time you'll accumulate all the batches with [`~evaluate.add_batch`] and calculate the metric at the very end. ```py ->>> metric = load_metric("accuracy") +>>> import evaluate + +>>> metric = evaluate.load("accuracy") >>> model.eval() >>> for batch in eval_dataloader: ... batch = {k: v.to(device) for k, v in batch.items()} From 5e2f373705b27318738c87cebf14548f9246e1bd Mon Sep 17 00:00:00 2001 From: YouJiacheng <1503679330@qq.com> Date: Wed, 10 Aug 2022 02:23:30 +0800 Subject: [PATCH 058/539] Restore _init_weights value in no_init_weights (#18504) * Recover _init_weights value in no_init_weights For potential nested use. In addition, users might modify private no_init_weights as well. * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Remove private variable change check Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- src/transformers/modeling_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 8bce35f9e336d9..78c012ec095fdb 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -106,12 +106,13 @@ def no_init_weights(_enable=True): TODO(Patrick): Delete safety argument `_enable=True` at next major version. . """ global _init_weights + old_init_weights = _init_weights if _enable: _init_weights = False try: yield finally: - _init_weights = True + _init_weights = old_init_weights try: From 38a674599c5b953bd6070e1f42ef3fee59fdb3b7 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Tue, 9 Aug 2022 15:15:01 -0400 Subject: [PATCH 059/539] Clean up comment --- src/transformers/utils/hub.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 570ff52e707532..07164e735db901 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -952,7 +952,6 @@ def move_to_new_cache(file, repo, filename, revision, etag, commit_hash): # blobs os.makedirs(os.path.join(repo, "blobs"), exist_ok=True) - # TODO: replace copy by move when all works well. blob_path = os.path.join(repo, "blobs", etag) shutil.move(file, blob_path) From 8cf4a6f0a63ed3aeed68192a9304fed2bd0ce100 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Tue, 9 Aug 2022 16:22:55 -0700 Subject: [PATCH 060/539] =?UTF-8?q?=20=F0=9F=93=9D=20update=20documentatio?= =?UTF-8?q?n=20build=20section=20(#18548)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/source/en/pr_checks.mdx | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/source/en/pr_checks.mdx b/docs/source/en/pr_checks.mdx index 57e0766c7f6776..8b562b62b29c57 100644 --- a/docs/source/en/pr_checks.mdx +++ b/docs/source/en/pr_checks.mdx @@ -65,13 +65,9 @@ Just in case anything slipped through the cracks, the full test suite is also ru ## Documentation build -The job `ci/circleci: build_doc` runs a build of the documentation just to make sure everything will be okay once your PR is merged. If that steps fails, you can inspect it locally by going into the `docs` folder of the Transformers repo and then typing +The `build_pr_documentation` job builds and generates a preview of the documentation to make sure everything looks okay once your PR is merged. A bot will add a link to preview the documentation in your PR. Any changes you make to the PR are automatically updated in the preview. If the documentation fails to build, click on **Details** next to the failed job to see where things went wrong. Often, the error is as simple as a missing file in the `toctree`. -```bash -make html -``` - -Sphinx is not known for its helpful error messages, so you might have to try a few things to really find the source of the error. +If you're interested in building or previewing the documentation locally, take a look at the [`README.md`](https://github.com/huggingface/transformers/tree/main/docs) in the docs folder. ## Code and documentation style From 4a51075a96d2049f368b5f3dd6c0e9f08f599b62 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Wed, 10 Aug 2022 09:13:36 +0200 Subject: [PATCH 061/539] `bitsandbytes` - `Linear8bitLt` integration into `transformers` models (#17901) * first commit * correct replace function * add final changes - works like charm! - cannot implement tests yet - tested * clean up a bit * add bitsandbytes dependencies * working version - added import function - added bitsandbytes utils file * small fix * small fix - fix import issue * fix import issues * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * refactor a bit - move bitsandbytes utils to utils - change comments on functions * reformat docstring - reformat docstring on init_empty_weights_8bit * Update src/transformers/__init__.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * revert bad formatting * change to bitsandbytes * refactor a bit - remove init8bit since it is useless * more refactoring - fixed init empty weights issue - added threshold param * small hack to make it work * Update src/transformers/modeling_utils.py * Update src/transformers/modeling_utils.py * revmoe the small hack * modify utils file * make style + refactor a bit * create correctly device map * add correct dtype for device map creation * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * apply suggestions - remove with torch.grad - do not rely on Python bool magic! * add docstring - add docstring for new kwargs * add docstring - comment `replace_8bit_linear` function - fix weird formatting * - added more documentation - added new utility function for memory footprint tracking - colab demo to add * few modifs - typo doc - force cast into float16 when load_in_8bit is enabled * added colab link * add test architecture + docstring a bit * refactor a bit testing class * make style + refactor a bit * enhance checks - add more checks - start writing saving test * clean up a bit * male style * add more details on doc * add more tests - still needs to fix 2 tests * replace by "or" - could not fix it from GitHub GUI Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * refactor a bit testing code + add readme * make style * fix import issue * Update src/transformers/modeling_utils.py Co-authored-by: Michael Benayoun * add few comments * add more doctring + make style * more docstring * raise error when loaded in 8bit * make style * add warning if loaded on CPU * add small sanity check * fix small comment * add bitsandbytes on dockerfile * Improve documentation - improve documentation from comments * add few comments * slow tests pass on the VM but not on the CI VM * Fix merge conflict * make style * another test should pass on a multi gpu setup * fix bad import in testing file * Fix slow tests - remove dummy batches - no more CUDA illegal memory errors * odify dockerfile * Update docs/source/en/main_classes/model.mdx * Update Dockerfile * Update model.mdx * Update Dockerfile * Apply suggestions from code review * few modifications - lm head can stay on disk/cpu - change model name so that test pass * change test value - change test value to the correct output - torch bmm changed to baddmm in bloom modeling when merging * modify installation guidelines * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * replace `n`by `name` * merge `load_in_8bit` and `low_cpu_mem_usage` * first try - keep the lm head in full precision * better check - check the attribute `base_model_prefix` instead of computing the number of parameters * added more tests * Update src/transformers/utils/bitsandbytes.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Merge branch 'integration-8bit' of https://github.com/younesbelkada/transformers into integration-8bit * improve documentation - fix typos for installation - change title in the documentation Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Michael Benayoun --- docker/transformers-all-latest-gpu/Dockerfile | 3 + docs/source/en/main_classes/model.mdx | 41 +++- src/transformers/__init__.py | 1 + src/transformers/modeling_utils.py | 102 ++++++++- src/transformers/utils/bitsandbytes.py | 142 ++++++++++++ tests/mixed_int8/README.md | 37 +++ tests/mixed_int8/__init__.py | 0 tests/mixed_int8/test_mixed_int8.py | 215 ++++++++++++++++++ utils/tests_fetcher.py | 1 + 9 files changed, 534 insertions(+), 8 deletions(-) create mode 100644 src/transformers/utils/bitsandbytes.py create mode 100644 tests/mixed_int8/README.md create mode 100644 tests/mixed_int8/__init__.py create mode 100644 tests/mixed_int8/test_mixed_int8.py diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index e97a91f4246fb4..b0a55ba8be946b 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -45,6 +45,9 @@ RUN python3 -m pip install -U "itsdangerous<2.1.0" RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate +# Add bitsandbytes for mixed int8 testing +RUN python3 -m pip install -i https://test.pypi.org/simple/ bitsandbytes==0.31.5 + RUN python3 -m pip install --no-cache-dir decord # When installing in editable mode, `transformers` is not recognized as a package. diff --git a/docs/source/en/main_classes/model.mdx b/docs/source/en/main_classes/model.mdx index c59af2d2214814..10f81e55d74506 100644 --- a/docs/source/en/main_classes/model.mdx +++ b/docs/source/en/main_classes/model.mdx @@ -105,7 +105,7 @@ You can also write your own device map following the same format (a dictionary l device_map = {"shared": 0, "encoder": 0, "decoder": 1, "lm_head": 1} ``` -Another way to minimize the memory impact of your model is to instantiate it at a lower precision dtype (like `torch.float16`). +Another way to minimize the memory impact of your model is to instantiate it at a lower precision dtype (like `torch.float16`) or use direct quantization techniques as described below. ### Model Instantiation dtype @@ -133,6 +133,45 @@ model = AutoModel.from_config(config) Due to Pytorch design, this functionality is only available for floating dtypes. +### `bitsandbytes` integration for Int8 mixed-precision matrix decomposition + +From the paper `GPT3.int8() : 8-bit Matrix Multiplication for Transformers at Scale`, we suport HuggingFace 🤗 integration for all models in the Hub with few lines of code. +For models trained in half-precision (aka, either `float16` or `bfloat16`) or full precision. This method aims to reduce `nn.Linear` size by 2 (if trained in half precision) or by 4 if trained in full precision, without affecting too much quality by operating on the outliers in half-precision. +This technique is useful and works well for billion scale models (>1B parameters) therefore we advice you to use it only for models of that scale. This method has been tested for 2-billion to 176-billion scale models and supports only PyTorch models. + +![HFxbitsandbytes.png](https://s3.amazonaws.com/moonup/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) + +Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models (>=176B parameters). +Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently distributed for large models. These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models (small models, fine-tuning). + +Note also that you would require a GPU to run mixed-8bit models as the kernels has been compiled for GPUs only. Make sure that you have enough GPU RAM to store the quarter (or half if your model is natively in half precision) of the model before using this feature. + +Below are some notes to help you use this module, or follow this demo on Google colab: [![Open In Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) + +#### Requirements + +- Make sure you run that on a NVIDIA GPU that supports 8-bit tensor cores (Turing or Ampere GPUs - e.g. T4, RTX20s RTX30s, A40-A100). Note that previous generations of NVIDIA GPUs do not support 8-bit tensor cores. +- Install the correct version of `bitsandbytes` by running: +`pip install -i https://test.pypi.org/simple/ bitsandbytes` +- Install `accelerate`: +`pip install accelerate` + +#### Running mixed-int8 models + +After carefully installing the required libraries, the way to load your mixed 8-bit model is as follows: +```py +model_name = "bigscience/bloom-2b5" +model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) +``` +The implementation supports multi-GPU setup thanks to `accelerate` as backend. If you want to control the GPU memory you want to allocate for each GPU, you can use the `max_memory` argument as follows: +(If allocating `1GB` into GPU-0 and `2GB` into GPU-1, you can use `max_memory={0:"1GB", 1:"2GB"}`) +```py +max_memory_mapping = {0: "1GB", 1: "2GB"} +model_name = "bigscience/bloom-3b" +model_8bit = AutoModelForCausalLM.from_pretrained( + model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping +) +``` ## ModuleUtilsMixin diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 0a97952b18b85e..be2be2727f0146 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -462,6 +462,7 @@ "is_vision_available", "logging", ], + "utils.bitsandbytes": [], } # sentencepiece-backed objects diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 78c012ec095fdb..1d895baecfedac 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -61,6 +61,7 @@ copy_func, has_file, is_accelerate_available, + is_bitsandbytes_available, is_offline_mode, logging, replace_return_docstrings, @@ -83,6 +84,9 @@ else: get_balanced_memory = None +if is_bitsandbytes_available(): + from .utils.bitsandbytes import get_key_to_not_convert, replace_8bit_linear, set_module_8bit_tensor_to_device + logger = logging.get_logger(__name__) @@ -501,6 +505,7 @@ def _load_state_dict_into_meta_model( state_dict_folder=None, state_dict_index=None, dtype=None, + load_in_8bit=False, ): """ This is somewhat similar to `_load_state_dict_into_model`, but deals with a model that has some or all of its @@ -561,13 +566,14 @@ def _load_state_dict_into_meta_model( # TODO: group all errors and raise at the end. raise ValueError(f"{param_name} doesn't have any device set.") param_device = device_map[module_name] - if param_device == "disk": offload_index = offload_weight(param, param_name, offload_folder, offload_index) elif param_device == "cpu" and state_dict_index is not None: state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index) - else: + elif not load_in_8bit: set_module_tensor_to_device(model, param_name, param_device, value=param) + else: + set_module_8bit_tensor_to_device(model, param_name, param_device, value=param) return error_msgs, offload_index, state_dict_index @@ -1578,6 +1584,24 @@ def save_pretrained( save_directory, repo_id, files_timestamps, commit_message=commit_message, token=token ) + def get_memory_footprint(self, return_buffers=True): + r""" + Get the memory footprint of a model. This will return the memory footprint of the current model in bytes. + Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the + PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2 + + Arguments: + return_buffers (`bool`, *optional*, defaults to `True`): + Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers + are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch + norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2 + """ + mem = sum([param.nelement() * param.element_size() for param in self.parameters()]) + if return_buffers: + mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()]) + mem = mem + mem_bufs + return mem + @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): r""" @@ -1707,6 +1731,22 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True` when there is some disk offload. + load_in_8bit (`bool`, *optional*, defaults to `False`): + If `True`, will convert the loaded model into mixed-8bit quantized model. To use this feature please + install `bitsandbytes` compiled with your CUDA version by running `pip install -i + https://test.pypi.org/simple/ bitsandbytes-cudaXXX` where XXX is your CUDA version (e.g. 11.6 = 116). + Make also sure that you have enough GPU RAM to store half of the model size since the 8bit modules are + not compiled and adapted for CPUs. + int8_threshold (`float`, *optional*, defaults to 6): + Works together with `load_in_8bit`. This corresponds to the outlier threshold for outlier detection as + described in `GPT3.int8() : 8-bit Matrix Multiplication for Transformers at Scale` paper. Any hidden + states value that is above this threshold will be considered an outlier and the operation on those + values will be done in fp16. Values are usually normally distributed, that is, most values are in the + range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently + distributed for large models. These outliers are often in the interval [-60, -6] or [6, 60]. Int8 + quantization works well for values of magnitude ~5, but beyond that, there is a significant performance + penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models + (small models, fine-tuning). subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. @@ -1796,7 +1836,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P device_map = kwargs.pop("device_map", None) max_memory = kwargs.pop("max_memory", None) offload_folder = kwargs.pop("offload_folder", None) - offload_state_dict = kwargs.pop("offload_state_dict", None) + offload_state_dict = kwargs.pop("offload_state_dict", False) + load_in_8bit = kwargs.pop("load_in_8bit", False) + int8_threshold = kwargs.pop("int8_threshold", 6.0) subfolder = kwargs.pop("subfolder", "") if trust_remote_code is True: @@ -1804,7 +1846,6 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is" " ignored." ) - if device_map is not None: if low_cpu_mem_usage is None: low_cpu_mem_usage = True @@ -1824,6 +1865,28 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P "Using `low_cpu_mem_usage=True` or a `device_map` requires Accelerate: `pip install accelerate`" ) + if load_in_8bit: + if not (is_accelerate_available() and is_bitsandbytes_available()): + raise ImportError( + "Using `load_in_8bit=True` requires Accelerate: `pip install accelerate` and the latest version of" + " bitsandbytes `pip install -i https://test.pypi.org/simple/ bitsandbytes` or" + " pip install bitsandbytes` " + ) + if torch_dtype == "auto" or torch_dtype != torch.float16: + # We force the `dtype` to be float16, this is a requirement from `bitsandbytes` + torch_dtype = torch.float16 + logger.info("Loading the model in mixed int8 - forcing the weights to be casted in float16") + if device_map is None: + raise ValueError( + "A device map needs to be passed to run convert models into mixed-int8 format. Please run" + "`.from_pretrained` with `device_map='auto'`" + ) + if from_tf or from_flax: + raise ValueError( + "Converting into mixed 8-bit weights from tf/flax weights is currently not supported, please make" + " sure the weights are in PyTorch format." + ) + from_pt = not (from_tf | from_flax) user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class} @@ -2063,12 +2126,19 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model") init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config())] + init_contexts - elif low_cpu_mem_usage: + elif load_in_8bit or low_cpu_mem_usage: init_contexts.append(init_empty_weights()) with ContextManagers(init_contexts): model = cls(config, *model_args, **model_kwargs) + if load_in_8bit: + logger.info("Detected 8-bit loading: activating 8-bit loading for this model") + + # We never convert lm_head or any last modules for numerical stability reasons + modules_to_not_convert = get_key_to_not_convert(model) + model = replace_8bit_linear(model, threshold=int8_threshold, modules_to_not_convert=modules_to_not_convert) + if isinstance(device_map, str): if model._no_split_modules is None: raise ValueError(f"{model.__class__.__name__} does not support `device_map='{device_map}'` yet.") @@ -2091,9 +2161,21 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P # Make sure tied weights are tied before creating the device map. model.tie_weights() device_map = infer_auto_device_map( - model, no_split_module_classes=no_split_modules, dtype=torch_dtype, max_memory=max_memory + model, + no_split_module_classes=no_split_modules, + dtype=torch_dtype if not load_in_8bit else torch.int8, + max_memory=max_memory, ) + if load_in_8bit: + # The LM head can stay on disk / CPU + device_map_without_lm_head = { + key: device_map[key] for key in device_map.keys() if key != modules_to_not_convert + } + if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): + raise ValueError("8-bit operations on `bitsandbytes` are not supported under CPU!") + del device_map_without_lm_head + if from_tf: if resolved_archive_file.endswith(".index"): # Load from a TensorFlow 1.X checkpoint - provided by original authors @@ -2145,6 +2227,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P offload_folder=offload_folder, offload_state_dict=offload_state_dict, dtype=torch_dtype, + load_in_8bit=load_in_8bit, ) # make sure token embedding weights are still tied if needed @@ -2185,6 +2268,7 @@ def _load_pretrained_model( offload_folder=None, offload_state_dict=None, dtype=None, + load_in_8bit=False, ): if device_map is not None and "disk" in device_map.values(): if offload_folder is None: @@ -2250,7 +2334,10 @@ def _fix_key(key): key = ".".join(key.split(".")[1:]) param = model_state_dict[key] if param.device == torch.device("meta"): - set_module_tensor_to_device(model, key, "cpu", torch.empty(*param.size())) + if not load_in_8bit: + set_module_tensor_to_device(model, key, "cpu", torch.empty(*param.size())) + else: + set_module_8bit_tensor_to_device(model, key, "cpu", torch.empty(*param.size())) # retrieve unintialized modules and initialize before maybe overriding that with the pretrained weights. if _fast_init: @@ -2359,6 +2446,7 @@ def _find_mismatched_keys( state_dict_folder=state_dict_folder, state_dict_index=state_dict_index, dtype=dtype, + load_in_8bit=load_in_8bit, ) error_msgs += new_error_msgs else: diff --git a/src/transformers/utils/bitsandbytes.py b/src/transformers/utils/bitsandbytes.py new file mode 100644 index 00000000000000..ee4e52d421fd09 --- /dev/null +++ b/src/transformers/utils/bitsandbytes.py @@ -0,0 +1,142 @@ +from transformers.utils import is_accelerate_available, is_bitsandbytes_available + + +if is_bitsandbytes_available(): + import torch + import torch.nn as nn + + import bitsandbytes as bnb + +if is_accelerate_available(): + from accelerate import init_empty_weights + + +def set_module_8bit_tensor_to_device(module, tensor_name, device, value=None): + """ + A helper function to set a given tensor (parameter of buffer) of a module on a specific device (note that doing + `param.to(device)` creates a new tensor not linked to the parameter, which is why we need this function). The + function is adapted from `set_module_tensor_to_device` function from accelerate that is adapted to support the + class `Int8Params` from `bitsandbytes`. + + Args: + module (`torch.nn.Module`): + The module in which the tensor we want to move lives. + tensor_name (`str`): + The full name of the parameter/buffer. + device (`int`, `str` or `torch.device`): + The device on which to set the tensor. + value (`torch.Tensor`, *optional*): + The value of the tensor (useful when going from the meta device to any other device). + """ + # Recurse if needed + if "." in tensor_name: + splits = tensor_name.split(".") + for split in splits[:-1]: + new_module = getattr(module, split) + if new_module is None: + raise ValueError(f"{module} has no attribute {split}.") + module = new_module + tensor_name = splits[-1] + + if tensor_name not in module._parameters and tensor_name not in module._buffers: + raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.") + is_buffer = tensor_name in module._buffers + old_value = getattr(module, tensor_name) + + if old_value.device == torch.device("meta") and device not in ["meta", torch.device("meta")] and value is None: + raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {device}.") + + if is_buffer: + has_fp16_weights = None + else: + has_fp16_weights = getattr(module._parameters[tensor_name], "has_fp16_weights", None) + + if has_fp16_weights is not None: + param = module._parameters[tensor_name] + if param.device.type != "cuda": + if value is None: + new_value = old_value.to(device) + elif isinstance(value, torch.Tensor): + new_value = value.to("cpu") + if value.dtype == torch.int8: + raise ValueError( + "You cannot load weights that are saved in int8 using `load_in_8bit=True`, make sure you are", + " using `load_in_8bit=True` on float32/float16/bfloat16 weights.", + ) + else: + new_value = torch.tensor(value, device="cpu") + new_value = bnb.nn.Int8Params(new_value, requires_grad=False, has_fp16_weights=has_fp16_weights).to(device) + module._parameters[tensor_name] = new_value + else: + if value is None: + new_value = old_value.to(device) + elif isinstance(value, torch.Tensor): + new_value = value.to(device) + else: + new_value = torch.tensor(value, device=device) + + if is_buffer: + module._buffers[tensor_name] = new_value + else: + new_value = nn.Parameter(new_value, requires_grad=old_value.requires_grad) + module._parameters[tensor_name] = new_value + + +def replace_8bit_linear(model, threshold=6.0, modules_to_not_convert="lm_head"): + """ + A helper function to replace all `torch.nn.Linear` modules by `bnb.nn.Linear8bit` modules from the `bitsandbytes` + library. This will enable running your models using mixed int8 precision as described by the paper `GPT3.int8(): + 8-bit Matrix Multiplication for Transformers at Scale`. Make sure `bitsandbytes` compiled with the correct CUDA + version of your hardware is installed before running this function. `pip install -i https://test.pypi.org/simple/ + bitsandbytes` + + The function will be run recursively and replace all `torch.nn.Linear` modules except for the `lm_head` that should + be kept as a `torch.nn.Linear` module. The replacement is done under `init_empty_weights` context manager so no + CPU/GPU memory is required to run this function. Int8 mixed-precision matrix decomposition works by separating a + matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 + (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no + predictive degradation is possible for very large models (>=176B parameters). + + Parameters: + model (`torch.nn.Module`): + Input model or `torch.nn.Module` as the function is run recursively. + threshold (`float`, *optional*, defaults to 6.0): + `int8_threshold` for outlier detection as described in the formentioned paper. This parameters is set to + `6.0` as described by the paper. + modules_to_not_convert (`str`, *optional*, defaults to `lm_head`): + Name of the module to not convert in `Linear8bitLt`. In practice we keep the `lm_head` in full precision + for numerical stability reasons. + """ + for name, module in model.named_children(): + if len(list(module.children())) > 0: + replace_8bit_linear(module, threshold, modules_to_not_convert) + + if isinstance(module, nn.Linear) and name != modules_to_not_convert: + with init_empty_weights(): + model._modules[name] = bnb.nn.Linear8bitLt( + module.in_features, + module.out_features, + module.bias is not None, + has_fp16_weights=False, + threshold=threshold, + ) + return model + + +def get_key_to_not_convert(model): + r""" + An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules + we may want to keep the lm_head in full precision for numerical stability reasons. + + Parameters: + model (`torch.nn.Module`): + Input model + """ + # Ignore this for base models (BertModel, GPT2Model, etc.) + if not hasattr(model, model.base_model_prefix): + return "" + + # otherwise they have an attached head + list_modules = list(model.named_parameters()) + last_name = list_modules[-1][0] + return last_name.split(".")[0] diff --git a/tests/mixed_int8/README.md b/tests/mixed_int8/README.md new file mode 100644 index 00000000000000..c0173bed7a6b7a --- /dev/null +++ b/tests/mixed_int8/README.md @@ -0,0 +1,37 @@ +# Testing mixed int8 quantization + +## Hardware requirements + +I am using a setup of 2 GPUs that are NVIDIA-Tesla T4 15GB + +## Virutal envs + +```conda create --name int8-testing python==3.8``` +```git clone https://github.com/younesbelkada/transformers.git && git checkout integration-8bit``` +```pip install -e ".[dev]"``` +```pip install -i https://test.pypi.org/simple/ bitsandbytes``` +```pip install git+https://github.com/huggingface/accelerate.git@e0212893ea6098cc0a7a3c7a6eb286a9104214c1``` + + +## Trobleshooting + +```conda create --name int8-testing python==3.8``` +```pip install -i https://test.pypi.org/simple/ bitsandbytes``` +```conda install pytorch torchvision torchaudio -c pytorch``` +```git clone https://github.com/younesbelkada/transformers.git && git checkout integration-8bit``` +```pip install -e ".[dev]"``` +```pip install git+https://github.com/huggingface/accelerate.git@b52b793ea8bac108ba61192eead3cf11ca02433d``` + +### Check driver settings: + +``` +nvcc --version +``` + +``` +ls -l $CONDA_PREFIX/lib/libcudart.so +``` + +### Recurrent bugs + +Sometimes you have to run a "dummy" inference pass when dealing with a multi-GPU setup. Checkout the ```test_multi_gpu_loading``` and the ```test_pipeline``` functions. \ No newline at end of file diff --git a/tests/mixed_int8/__init__.py b/tests/mixed_int8/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/mixed_int8/test_mixed_int8.py b/tests/mixed_int8/test_mixed_int8.py new file mode 100644 index 00000000000000..0cd7ca16411c19 --- /dev/null +++ b/tests/mixed_int8/test_mixed_int8.py @@ -0,0 +1,215 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Team Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a clone of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import gc +import unittest + +from transformers import AutoModel, AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, pipeline +from transformers.testing_utils import ( + is_torch_available, + require_accelerate, + require_bitsandbytes, + require_torch, + require_torch_gpu, + require_torch_multi_gpu, + slow, +) + + +if is_torch_available(): + import torch + + +@require_bitsandbytes +@require_accelerate +@require_torch +@require_torch_gpu +@slow +class BaseMixedInt8Test(unittest.TestCase): + # We keep the constants inside the init function and model loading inside setUp function + + # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) + # Therefore here we use only bloom-1b3 to test our module + model_name = "bigscience/bloom-1b7" + + # Constant values + EXPECTED_RELATIVE_DIFFERENCE = ( + 1.540025 # This was obtained on a Quadro RTX 8000 so the number might slightly change + ) + + input_text = "Hello my name is" + EXPECTED_OUTPUT = "Hello my name is John.\nI am a friend of the family.\n" + MAX_NEW_TOKENS = 10 + + def setUp(self): + # Models and tokenizer + self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) + + +class MixedInt8Test(BaseMixedInt8Test): + def setUp(self): + super().setUp() + + # Models and tokenizer + self.model_fp16 = AutoModelForCausalLM.from_pretrained(self.model_name, torch_dtype="auto", device_map="auto") + self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") + + def tearDown(self): + r""" + TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to + avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 + """ + del self.model_fp16 + del self.model_8bit + + gc.collect() + torch.cuda.empty_cache() + + def test_memory_footprint(self): + r""" + A simple test to check if the model conversion has been done correctly by checking on the + memory footprint of the converted model and the class type of the linear layers of the converted models + """ + from bitsandbytes.nn import Int8Params + + mem_fp16 = self.model_fp16.get_memory_footprint() + mem_8bit = self.model_8bit.get_memory_footprint() + + self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE) + self.assertTrue(self.model_8bit.transformer.h[0].mlp.dense_4h_to_h.weight.__class__ == Int8Params) + + def test_generate_quality(self): + r""" + Test the generation quality of the quantized model and see that we are matching the expected output. + Given that we are operating on small numbers + the testing model is relatively small, we might not get + the same output across GPUs. So we'll generate few tokens (5-10) and check their output. + """ + encoded_input = self.tokenizer(self.input_text, return_tensors="pt") + output_sequences = self.model_8bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) + + self.assertEqual(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) + + +class MixedInt8ModelClassesTest(BaseMixedInt8Test): + def setUp(self): + super().setUp() + # model_name + self.model_name = "bigscience/bloom-560m" + # Models and tokenizer + self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") + self.sequence_model = AutoModelForSequenceClassification.from_pretrained( + self.model_name, load_in_8bit=True, device_map="auto" + ) + self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") + + def tearDown(self): + r""" + TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to + avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 + """ + del self.base_model + del self.sequence_model + del self.model_8bit + + gc.collect() + torch.cuda.empty_cache() + + def test_correct_head_class(self): + r""" + A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) + are kept in their native class. + """ + from bitsandbytes.nn import Int8Params + + # last param of a base model should be a linear8bit module + self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) + + # Other heads should be nn.Parameter + self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) + self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) + + +class MixedInt8TestPipeline(BaseMixedInt8Test): + def setUp(self): + super().setUp() + + def tearDown(self): + r""" + TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to + avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 + """ + del self.pipe + + gc.collect() + torch.cuda.empty_cache() + + def test_pipeline(self): + r""" + The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since + we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything + on pipline. + """ + # self._clear_cuda_cache() + self.pipe = pipeline( + "text-generation", + model=self.model_name, + model_kwargs={"device_map": "auto", "load_in_8bit": True}, + max_new_tokens=self.MAX_NEW_TOKENS, + ) + + # Real second forward pass + pipeline_output = self.pipe(self.input_text) + self.assertEqual(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUT) + + +@require_torch_multi_gpu +class MixedInt8TestMultiGpu(BaseMixedInt8Test): + def setUp(self): + super().setUp() + + def test_multi_gpu_loading(self): + r""" + This tests that the model has been loaded and can be used correctly on a multi-GPU setup. + Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice + """ + + memory_mapping = {0: "1GB", 1: "2GB"} + model_parallel = AutoModelForCausalLM.from_pretrained( + self.model_name, load_in_8bit=True, max_memory=memory_mapping, device_map="auto" + ) + + def get_list_devices(model): + list_devices = [] + for _, module in model.named_children(): + if len(list(module.children())) > 0: + list_devices.extend(get_list_devices(module)) + else: + # Do a try except since we can encounter Dropout modules that does not + # have any device set + try: + list_devices.append(next(module.parameters()).device.index) + except BaseException: + continue + return list_devices + + list_devices = get_list_devices(model_parallel) + # Check that we have dispatched the model into 2 separate devices + self.assertTrue((1 in list_devices) and (0 in list_devices)) + + # Check that inference pass works on the model + encoded_input = self.tokenizer(self.input_text, return_tensors="pt") + + # Second real batch + output_parallel = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) + self.assertEqual(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index 329d248de3c089..ba122f43f805db 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -465,6 +465,7 @@ def module_to_test_file(module_fname): "tests/sagemaker/test_single_node_gpu.py", # SageMaker test "tests/sagemaker/test_multi_node_model_parallel.py", # SageMaker test "tests/sagemaker/test_multi_node_data_parallel.py", # SageMaker test + "tests/mixed_int8/test_mixed_int8.py", # Mixed-int8 bitsandbytes test ] From 34aad0dac000508015d09ed7cf7c88adb5a0e308 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Wed, 10 Aug 2022 12:57:21 +0100 Subject: [PATCH 062/539] TF: XLA-trainable DeBERTa v2 (#18546) * fix deberta issues * add different code paths for gpu and tpu * shorter gpu take along axis * Stable Dropout without tf cond * variable must be float --- .../models/deberta/modeling_tf_deberta.py | 49 +++++++------- .../deberta_v2/modeling_tf_deberta_v2.py | 67 ++++++++++--------- 2 files changed, 62 insertions(+), 54 deletions(-) diff --git a/src/transformers/models/deberta/modeling_tf_deberta.py b/src/transformers/models/deberta/modeling_tf_deberta.py index 1d8c01e24acda0..edb9b2b0855532 100644 --- a/src/transformers/models/deberta/modeling_tf_deberta.py +++ b/src/transformers/models/deberta/modeling_tf_deberta.py @@ -101,27 +101,6 @@ def call(self, inputs: tf.Tensor, mask: tf.Tensor): return output -def get_mask(input, dropout): - mask = tf.cast( - 1 - tf.compat.v1.distributions.Bernoulli(probs=1 - dropout).sample(sample_shape=shape_list(input)), tf.bool - ) - return mask, dropout - - -@tf.custom_gradient -def TFDebertaXDropout(input, local_ctx): - mask, dropout = get_mask(input, local_ctx) - scale = tf.convert_to_tensor(1.0 / (1 - dropout), dtype=tf.float32) - input = tf.cond(dropout > 0, lambda: tf.where(mask, 0.0, input) * scale, lambda: input) - - def custom_grad(upstream_grad): - return tf.cond( - scale > 1, lambda: (tf.where(mask, 0.0, upstream_grad) * scale, None), lambda: (upstream_grad, None) - ) - - return input, custom_grad - - class TFDebertaStableDropout(tf.keras.layers.Layer): """ Optimized dropout module for stabilizing the training @@ -132,11 +111,33 @@ class TFDebertaStableDropout(tf.keras.layers.Layer): def __init__(self, drop_prob, **kwargs): super().__init__(**kwargs) - self.drop_prob = tf.convert_to_tensor(drop_prob, dtype=tf.float32) + self.drop_prob = drop_prob + + @tf.custom_gradient + def xdropout(self, inputs): + """ + Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob. + """ + mask = tf.cast( + 1 + - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), + tf.bool, + ) + scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32) + if self.drop_prob > 0: + inputs = tf.where(mask, 0.0, inputs) * scale + + def grad(upstream): + if self.drop_prob > 0: + return tf.where(mask, 0.0, upstream) * scale + else: + return upstream + + return inputs, grad def call(self, inputs: tf.Tensor, training: tf.Tensor = False): - if training and self.drop_prob > 0: - return TFDebertaXDropout(inputs, self.drop_prob) + if training: + return self.xdropout(inputs) return inputs diff --git a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py index aabb3b2d380ea1..fa9a202427e5e9 100644 --- a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py @@ -102,29 +102,6 @@ def call(self, inputs: tf.Tensor, mask: tf.Tensor): return output -# Copied from transformers.models.deberta.modeling_tf_deberta.get_mask -def get_mask(input, dropout): - mask = tf.cast( - 1 - tf.compat.v1.distributions.Bernoulli(probs=1 - dropout).sample(sample_shape=shape_list(input)), tf.bool - ) - return mask, dropout - - -@tf.custom_gradient -# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaXDropout -def TFDebertaV2XDropout(input, local_ctx): - mask, dropout = get_mask(input, local_ctx) - scale = tf.convert_to_tensor(1.0 / (1 - dropout), dtype=tf.float32) - input = tf.cond(dropout > 0, lambda: tf.where(mask, 0.0, input) * scale, lambda: input) - - def custom_grad(upstream_grad): - return tf.cond( - scale > 1, lambda: (tf.where(mask, 0.0, upstream_grad) * scale, None), lambda: (upstream_grad, None) - ) - - return input, custom_grad - - # Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaStableDropout with Deberta->DebertaV2 class TFDebertaV2StableDropout(tf.keras.layers.Layer): """ @@ -136,11 +113,33 @@ class TFDebertaV2StableDropout(tf.keras.layers.Layer): def __init__(self, drop_prob, **kwargs): super().__init__(**kwargs) - self.drop_prob = tf.convert_to_tensor(drop_prob, dtype=tf.float32) + self.drop_prob = drop_prob + + @tf.custom_gradient + def xdropout(self, inputs): + """ + Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob. + """ + mask = tf.cast( + 1 + - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)), + tf.bool, + ) + scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=tf.float32) + if self.drop_prob > 0: + inputs = tf.where(mask, 0.0, inputs) * scale + + def grad(upstream): + if self.drop_prob > 0: + return tf.where(mask, 0.0, upstream) * scale + else: + return upstream + + return inputs, grad def call(self, inputs: tf.Tensor, training: tf.Tensor = False): - if training and self.drop_prob > 0: - return TFDebertaV2XDropout(inputs, self.drop_prob) + if training: + return self.xdropout(inputs) return inputs @@ -525,10 +524,18 @@ def pos_dynamic_expand(pos_index, p2c_att, key_layer): def take_along_axis(x, indices): # Only a valid port of np.take_along_axis when the gather axis is -1 - flat_x = tf.reshape(x, (-1, x.shape[-1])) - flat_indices = tf.reshape(indices, (-1, indices.shape[-1])) - gathered = tf.gather(flat_x, flat_indices, batch_dims=1) - gathered = tf.reshape(gathered, indices.shape) + # TPU + gathers and reshapes don't go along well -- see https://github.com/huggingface/transformers/issues/18239 + if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy): + # [B, S, P] -> [B, S, P, D] + one_hot_indices = tf.one_hot(indices, depth=x.shape[-1], dtype=x.dtype) + + # if we ignore the first two dims, this is equivalent to multiplying a matrix (one hot) by a vector (x) + # grossly abusing notation: [B, S, P, D] . [B, S, D] = [B, S, P] + gathered = tf.einsum("ijkl,ijl->ijk", one_hot_indices, x) + + # GPUs, on the other hand, prefer gathers instead of large one-hot+matmuls + else: + gathered = tf.gather(x, indices, batch_dims=2) return gathered From d7e2d7b40b1070cddfe878e13705725f49a2cf1f Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 10 Aug 2022 08:00:18 -0400 Subject: [PATCH 063/539] Preserve hub-related kwargs in AutoModel.from_pretrained (#18545) * Preserve hub-related kwargs in AutoModel.from_pretrained * Fix tests * Remove debug statement --- src/transformers/models/auto/auto_factory.py | 29 +++++++++++++++---- .../models/auto/configuration_auto.py | 6 ++-- 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/src/transformers/models/auto/auto_factory.py b/src/transformers/models/auto/auto_factory.py index b7d8f66c339dd4..b412f14157f1c3 100644 --- a/src/transformers/models/auto/auto_factory.py +++ b/src/transformers/models/auto/auto_factory.py @@ -419,9 +419,24 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop("config", None) trust_remote_code = kwargs.pop("trust_remote_code", False) kwargs["_from_auto"] = True + hub_kwargs_names = [ + "cache_dir", + "force_download", + "local_files_only", + "proxies", + "resume_download", + "revision", + "subfolder", + "use_auth_token", + ] + hub_kwargs = {name: kwargs.pop(name) for name in hub_kwargs_names if name in kwargs} if not isinstance(config, PretrainedConfig): config, kwargs = AutoConfig.from_pretrained( - pretrained_model_name_or_path, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **kwargs + pretrained_model_name_or_path, + return_unused_kwargs=True, + trust_remote_code=trust_remote_code, + **hub_kwargs, + **kwargs, ) if hasattr(config, "auto_map") and cls.__name__ in config.auto_map: if not trust_remote_code: @@ -430,7 +445,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): "on your local machine. Make sure you have read the code there to avoid malicious use, then set " "the option `trust_remote_code=True` to remove this error." ) - if kwargs.get("revision", None) is None: + if hub_kwargs.get("revision", None) is None: logger.warning( "Explicitly passing a `revision` is encouraged when loading a model with custom code to ensure " "no malicious code has been contributed in a newer revision." @@ -438,12 +453,16 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): class_ref = config.auto_map[cls.__name__] module_file, class_name = class_ref.split(".") model_class = get_class_from_dynamic_module( - pretrained_model_name_or_path, module_file + ".py", class_name, **kwargs + pretrained_model_name_or_path, module_file + ".py", class_name, **hub_kwargs, **kwargs + ) + return model_class.from_pretrained( + pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs ) - return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) elif type(config) in cls._model_mapping.keys(): model_class = _get_model_class(config, cls._model_mapping) - return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs) + return model_class.from_pretrained( + pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs + ) raise ValueError( f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n" f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}." diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index d8ecbb49e64f29..c65a2762a00029 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -728,7 +728,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): kwargs["_from_auto"] = True kwargs["name_or_path"] = pretrained_model_name_or_path trust_remote_code = kwargs.pop("trust_remote_code", False) - config_dict, _ = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) + config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) if "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"]: if not trust_remote_code: raise ValueError( @@ -749,13 +749,13 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs) elif "model_type" in config_dict: config_class = CONFIG_MAPPING[config_dict["model_type"]] - return config_class.from_dict(config_dict, **kwargs) + return config_class.from_dict(config_dict, **unused_kwargs) else: # Fallback: use pattern matching on the string. # We go from longer names to shorter names to catch roberta before bert (for instance) for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True): if pattern in str(pretrained_model_name_or_path): - return CONFIG_MAPPING[pattern].from_dict(config_dict, **kwargs) + return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs) raise ValueError( f"Unrecognized model in {pretrained_model_name_or_path}. " From 6eb51450fa2a440a45e02b29f01e4f2aa4f70a4d Mon Sep 17 00:00:00 2001 From: Matt Date: Wed, 10 Aug 2022 11:49:51 -0400 Subject: [PATCH 064/539] TF Examples Rewrite (#18451) * Finished QA example * Dodge a merge conflict * Update text classification and LM examples * Update NER example * New Keras metrics WIP, fix NER example * Update NER example * Update MC, summarization and translation examples * Add XLA warnings when shapes are variable * Make sure batch_size is consistently scaled by num_replicas * Add PushToHubCallback to all models * Add docs links for KerasMetricCallback * Add docs links for prepare_tf_dataset and jit_compile * Correct inferred model names * Don't assume the dataset has 'lang' * Don't assume the dataset has 'lang' * Write metrics in text classification * Add 'framework' to TrainingArguments and TFTrainingArguments * Export metrics in all examples and add tests * Fix training args for Flax * Update command line args for translation test * make fixup * Fix accidentally running other tests in fp16 * Remove do_train/do_eval from run_clm.py * Remove do_train/do_eval from run_mlm.py * Add tensorflow tests to circleci * Fix circleci * Update examples/tensorflow/language-modeling/run_mlm.py Co-authored-by: Joao Gante * Update examples/tensorflow/test_tensorflow_examples.py Co-authored-by: Joao Gante * Update examples/tensorflow/translation/run_translation.py Co-authored-by: Joao Gante * Update examples/tensorflow/token-classification/run_ner.py Co-authored-by: Joao Gante * Fix save path for tests * Fix some model card kwargs * Explain the magical -1000 * Actually enable tests this time * Skip text classification PR until we fix shape inference * make fixup Co-authored-by: Joao Gante --- .circleci/config.yml | 67 ++++ examples/tensorflow/_tests_requirements.txt | 25 ++ .../tensorflow/language-modeling/run_clm.py | 159 ++++++--- .../tensorflow/language-modeling/run_mlm.py | 140 +++++--- .../tensorflow/multiple-choice/run_swag.py | 132 +++++--- .../tensorflow/question-answering/run_qa.py | 187 ++++++++--- .../summarization/run_summarization.py | 278 ++++++++------- .../tensorflow/test_tensorflow_examples.py | 295 ++++++++++++++++ .../text-classification/run_glue.py | 135 +++++--- .../run_text_classification.py | 141 +++++--- .../token-classification/run_ner.py | 213 ++++++------ .../tensorflow/translation/run_translation.py | 317 ++++++++++-------- src/transformers/optimization_tf.py | 16 +- src/transformers/training_args.py | 43 +-- src/transformers/training_args_tf.py | 4 +- 15 files changed, 1491 insertions(+), 661 deletions(-) create mode 100644 examples/tensorflow/_tests_requirements.txt create mode 100644 examples/tensorflow/test_tensorflow_examples.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 83ee65248e9cac..666505ab3b4389 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -658,6 +658,71 @@ jobs: - store_artifacts: path: ~/transformers/reports + run_examples_tensorflow: + working_directory: ~/transformers + docker: + - image: cimg/python:3.7.12 + environment: + OMP_NUM_THREADS: 1 + TRANSFORMERS_IS_CI: yes + PYTEST_TIMEOUT: 120 + resource_class: xlarge + parallelism: 1 + steps: + - checkout + - restore_cache: + keys: + - v0.5-tensorflow_examples-{{ checksum "setup.py" }} + - v0.5-{{ checksum "setup.py" }} + - run: pip install --upgrade pip + - run: pip install .[sklearn,tensorflow,sentencepiece,testing] + - run: pip install -r examples/tensorflow/_tests_requirements.txt + - save_cache: + key: v0.5-tensorflow_examples-{{ checksum "setup.py" }} + paths: + - '~/.cache/pip' + - run: python utils/tests_fetcher.py --filters examples tests | tee test_preparation.txt + - store_artifacts: + path: ~/transformers/test_preparation.txt + - run: | + if [ -f test_list.txt ]; then + python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_tensorflow ./examples/tensorflow/ | tee tests_output.txt + fi + - store_artifacts: + path: ~/transformers/tensorflow_examples_output.txt + - store_artifacts: + path: ~/transformers/reports + + run_examples_tensorflow_all: + working_directory: ~/transformers + docker: + - image: cimg/python:3.7.12 + environment: + OMP_NUM_THREADS: 1 + TRANSFORMERS_IS_CI: yes + PYTEST_TIMEOUT: 120 + resource_class: xlarge + parallelism: 1 + steps: + - checkout + - restore_cache: + keys: + - v0.5-tensorflow_examples-{{ checksum "setup.py" }} + - v0.5-{{ checksum "setup.py" }} + - run: pip install --upgrade pip + - run: pip install .[sklearn,tensorflow,sentencepiece,testing] + - run: pip install -r examples/tensorflow/_tests_requirements.txt + - save_cache: + key: v0.5-tensorflow_examples-{{ checksum "setup.py" }} + paths: + - '~/.cache/pip' + - run: | + TRANSFORMERS_IS_CI=1 python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_tensorflow ./examples/tensorflow/ | tee examples_output.txt + - store_artifacts: + path: ~/transformers/tensorflow_examples_output.txt + - store_artifacts: + path: ~/transformers/reports + run_examples_flax: working_directory: ~/transformers docker: @@ -1000,6 +1065,7 @@ workflows: - check_code_quality - check_repository_consistency - run_examples_torch + - run_examples_tensorflow - run_examples_flax - run_tests_custom_tokenizers - run_tests_torch_and_tf @@ -1022,6 +1088,7 @@ workflows: - main jobs: - run_examples_torch_all + - run_examples_tensorflow_all - run_examples_flax_all - run_tests_torch_and_tf_all - run_tests_torch_and_flax_all diff --git a/examples/tensorflow/_tests_requirements.txt b/examples/tensorflow/_tests_requirements.txt new file mode 100644 index 00000000000000..37e37e35259176 --- /dev/null +++ b/examples/tensorflow/_tests_requirements.txt @@ -0,0 +1,25 @@ +tensorflow +tensorboard +scikit-learn +seqeval +psutil +sacrebleu >= 1.4.12 +git+https://github.com/huggingface/accelerate@main#egg=accelerate +rouge-score +tensorflow_datasets +matplotlib +git-python==1.0.3 +faiss-cpu +streamlit +elasticsearch +nltk +pandas +datasets >= 1.13.3 +fire +pytest +conllu +sentencepiece != 0.1.92 +protobuf +jiwer +librosa +evaluate >= 0.2.0 diff --git a/examples/tensorflow/language-modeling/run_clm.py b/examples/tensorflow/language-modeling/run_clm.py index 3f12683d10d997..cbe2f54f22731d 100755 --- a/examples/tensorflow/language-modeling/run_clm.py +++ b/examples/tensorflow/language-modeling/run_clm.py @@ -22,6 +22,8 @@ """ # You can also adapt this script on your own clm task. Pointers for this are left as comments. +import json + # region Imports import logging import math @@ -46,8 +48,8 @@ TF_MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoTokenizer, - DefaultDataCollator, HfArgumentParser, + PushToHubCallback, TFAutoModelForCausalLM, TFTrainingArguments, create_optimizer, @@ -205,21 +207,6 @@ def __post_init__(self): assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." -# endregion - -# region Helper classes -class SavePretrainedCallback(tf.keras.callbacks.Callback): - # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary - # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback - # that saves the model with this method after each epoch. - def __init__(self, output_dir, **kwargs): - super().__init__() - self.output_dir = output_dir - - def on_epoch_end(self, epoch, logs=None): - self.model.save_pretrained(self.output_dir) - - # endregion @@ -299,6 +286,7 @@ def main(): raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, + cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) if "validation" not in raw_datasets.keys(): @@ -306,12 +294,14 @@ def main(): data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", + cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) raw_datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", + cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: @@ -321,16 +311,39 @@ def main(): data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file - extension = data_args.train_file.split(".")[-1] + extension = ( + data_args.train_file.split(".")[-1] + if data_args.train_file is not None + else data_args.validation_file.split(".")[-1] + ) if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = data_args.keep_linebreaks raw_datasets = load_dataset( extension, data_files=data_files, + cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) + # If no validation data is there, validation_split_percentage will be used to divide the dataset. + if "validation" not in raw_datasets.keys(): + raw_datasets["validation"] = load_dataset( + extension, + data_files=data_files, + split=f"train[:{data_args.validation_split_percentage}%]", + cache_dir=model_args.cache_dir, + use_auth_token=True if model_args.use_auth_token else None, + **dataset_args, + ) + raw_datasets["train"] = load_dataset( + extension, + data_files=data_files, + split=f"train[{data_args.validation_split_percentage}%:]", + cache_dir=model_args.cache_dir, + use_auth_token=True if model_args.use_auth_token else None, + **dataset_args, + ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # endregion @@ -446,7 +459,7 @@ def group_texts(examples): eval_dataset = eval_dataset.select(range(max_eval_samples)) # Log a few random samples from the training set: - for index in random.sample(range(len(train_dataset)), 3): + for index in random.sample(range(len(train_dataset)), min(3, len(train_dataset))): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # endregion @@ -465,44 +478,88 @@ def group_texts(examples): # region TF Dataset preparation num_replicas = training_args.strategy.num_replicas_in_sync - data_collator = DefaultDataCollator(return_tensors="tf") options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF - tf_train_dataset = train_dataset.to_tf_dataset( - # labels are passed as input, as we will use the model's internal loss - columns=[col for col in train_dataset.features if col != "special_tokens_mask"], + # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in + # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also + # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names + # yourself if you use this method, whereas they are automatically inferred from the model input names when + # using model.prepare_tf_dataset() + # For more info see the docs: + # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset + # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset + + tf_train_dataset = model.prepare_tf_dataset( + train_dataset, shuffle=True, batch_size=num_replicas * training_args.per_device_train_batch_size, - collate_fn=data_collator, - drop_remainder=True, ).with_options(options) - tf_eval_dataset = eval_dataset.to_tf_dataset( - # labels are passed as input, as we will use the model's internal loss - columns=[col for col in eval_dataset.features if col != "special_tokens_mask"], + tf_eval_dataset = model.prepare_tf_dataset( + eval_dataset, shuffle=False, - batch_size=num_replicas * training_args.per_device_train_batch_size, - collate_fn=data_collator, + batch_size=num_replicas * training_args.per_device_eval_batch_size, drop_remainder=True, ).with_options(options) # endregion # region Optimizer and loss - batches_per_epoch = len(train_dataset) // (num_replicas * training_args.per_device_train_batch_size) + num_train_steps = len(tf_train_dataset) * int(training_args.num_train_epochs) + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 + # Bias and layernorm weights are automatically excluded from the decay optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, - num_train_steps=int(training_args.num_train_epochs * batches_per_epoch), - num_warmup_steps=training_args.warmup_steps, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, ) # no user-specified loss = will use the model internal loss - model.compile(optimizer=optimizer) + model.compile(optimizer=optimizer, jit_compile=training_args.xla) + # endregion + + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + if data_args.dataset_name is not None: + push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" + else: + push_to_hub_model_id = f"{model_name}-finetuned-clm" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"} + if data_args.dataset_name is not None: + model_card_kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + model_card_kwargs["dataset_args"] = data_args.dataset_config_name + model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + model_card_kwargs["dataset"] = data_args.dataset_name + + if training_args.push_to_hub: + callbacks = [ + PushToHubCallback( + output_dir=training_args.output_dir, + model_id=push_to_hub_model_id, + organization=training_args.push_to_hub_organization, + token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ] + else: + callbacks = [] # endregion # region Training and validation @@ -512,33 +569,45 @@ def group_texts(examples): logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size = {training_args.per_device_train_batch_size * num_replicas}") + # For long training runs, you may wish to use the PushToHub() callback here to save intermediate checkpoints + # to the Hugging Face Hub rather than just pushing the finished model. + # See https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.PushToHubCallback + history = model.fit( tf_train_dataset, validation_data=tf_eval_dataset, epochs=int(training_args.num_train_epochs), - steps_per_epoch=len(train_dataset) // (training_args.per_device_train_batch_size * num_replicas), - callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)], + callbacks=callbacks, ) + train_loss = history.history["loss"][-1] try: - train_perplexity = math.exp(history.history["loss"][-1]) + train_perplexity = math.exp(train_loss) except OverflowError: train_perplexity = math.inf + logger.info(f" Final train loss: {train_loss:.3f}") + logger.info(f" Final train perplexity: {train_perplexity:.3f}") + validation_loss = history.history["val_loss"][-1] try: - validation_perplexity = math.exp(history.history["val_loss"][-1]) + validation_perplexity = math.exp(validation_loss) except OverflowError: validation_perplexity = math.inf - logger.info(f" Final train loss: {history.history['loss'][-1]:.3f}") - logger.info(f" Final train perplexity: {train_perplexity:.3f}") - logger.info(f" Final validation loss: {history.history['val_loss'][-1]:.3f}") + logger.info(f" Final validation loss: {validation_loss:.3f}") logger.info(f" Final validation perplexity: {validation_perplexity:.3f}") - # endregion if training_args.output_dir is not None: - model.save_pretrained(training_args.output_dir) + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + results_dict = dict() + results_dict["train_loss"] = train_loss + results_dict["train_perplexity"] = train_perplexity + results_dict["eval_loss"] = validation_loss + results_dict["eval_perplexity"] = validation_perplexity + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(results_dict)) + # endregion - if training_args.push_to_hub: - # You'll probably want to include some of your own metadata here! - model.push_to_hub() + if training_args.output_dir is not None and not training_args.push_to_hub: + # If we're not pushing to hub, at least save a local copy when we're done + model.save_pretrained(training_args.output_dir) if __name__ == "__main__": diff --git a/examples/tensorflow/language-modeling/run_mlm.py b/examples/tensorflow/language-modeling/run_mlm.py index b421ed8e669c15..43449a093411d4 100755 --- a/examples/tensorflow/language-modeling/run_mlm.py +++ b/examples/tensorflow/language-modeling/run_mlm.py @@ -22,9 +22,7 @@ """ # You can also adapt this script on your own mlm task. Pointers for this are left as comments. -# TODO Do multi-GPU and TPU tests and make sure the dataset length works as expected -# TODO Duplicate all changes over to the CLM script - +import json import logging import math import os @@ -50,6 +48,7 @@ AutoTokenizer, DataCollatorForLanguageModeling, HfArgumentParser, + PushToHubCallback, TFAutoModelForMaskedLM, TFTrainingArguments, create_optimizer, @@ -217,22 +216,6 @@ def __post_init__(self): # endregion -# region Helper classes -class SavePretrainedCallback(tf.keras.callbacks.Callback): - # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary - # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback - # that saves the model with this method after each epoch. - def __init__(self, output_dir, **kwargs): - super().__init__() - self.output_dir = output_dir - - def on_epoch_end(self, epoch, logs=None): - self.model.save_pretrained(self.output_dir) - - -# endregion - - def main(): # region Argument Parsing parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) @@ -492,7 +475,7 @@ def group_texts(examples): eval_dataset = eval_dataset.select(range(max_eval_samples)) # Log a few random samples from the training set: - for index in random.sample(range(len(train_dataset)), 3): + for index in random.sample(range(len(train_dataset)), min(3, len(train_dataset))): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # endregion @@ -517,40 +500,88 @@ def group_texts(examples): options = tf.data.Options() options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF - tf_train_dataset = train_dataset.to_tf_dataset( - # labels are passed as input, as we will use the model's internal loss - columns=[col for col in train_dataset.features if col != "special_tokens_mask"] + ["labels"], + # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in + # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also + # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names + # yourself if you use this method, whereas they are automatically inferred from the model input names when + # using model.prepare_tf_dataset() + # For more info see the docs: + # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset + # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset + + tf_train_dataset = model.prepare_tf_dataset( + train_dataset, shuffle=True, batch_size=num_replicas * training_args.per_device_train_batch_size, collate_fn=data_collator, - drop_remainder=True, ).with_options(options) - tf_eval_dataset = eval_dataset.to_tf_dataset( + tf_eval_dataset = model.prepare_tf_dataset( + eval_dataset, # labels are passed as input, as we will use the model's internal loss - columns=[col for col in eval_dataset.features if col != "special_tokens_mask"] + ["labels"], shuffle=False, - batch_size=num_replicas * training_args.per_device_train_batch_size, + batch_size=num_replicas * training_args.per_device_eval_batch_size, collate_fn=data_collator, drop_remainder=True, ).with_options(options) # endregion # region Optimizer and loss - batches_per_epoch = len(train_dataset) // (num_replicas * training_args.per_device_train_batch_size) + num_train_steps = len(tf_train_dataset) * int(training_args.num_train_epochs) + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 + # Bias and layernorm weights are automatically excluded from the decay optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, - num_train_steps=int(training_args.num_train_epochs * batches_per_epoch), - num_warmup_steps=training_args.warmup_steps, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, ) # no user-specified loss = will use the model internal loss - model.compile(optimizer=optimizer) + model.compile(optimizer=optimizer, jit_compile=training_args.xla, run_eagerly=True) + # endregion + + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + if data_args.dataset_name is not None: + push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" + else: + push_to_hub_model_id = f"{model_name}-finetuned-mlm" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"} + if data_args.dataset_name is not None: + model_card_kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + model_card_kwargs["dataset_args"] = data_args.dataset_config_name + model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + model_card_kwargs["dataset"] = data_args.dataset_name + + if training_args.push_to_hub: + callbacks = [ + PushToHubCallback( + output_dir=training_args.output_dir, + model_id=push_to_hub_model_id, + organization=training_args.push_to_hub_organization, + token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ] + else: + callbacks = [] # endregion # region Training and validation @@ -560,33 +591,46 @@ def group_texts(examples): logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size = {training_args.per_device_train_batch_size * num_replicas}") + # For long training runs, you may wish to use the PushToHub() callback here to save intermediate checkpoints + # to the Hugging Face Hub rather than just pushing the finished model. + # See https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.PushToHubCallback + history = model.fit( tf_train_dataset, validation_data=tf_eval_dataset, epochs=int(training_args.num_train_epochs), - steps_per_epoch=len(train_dataset) // (training_args.per_device_train_batch_size * num_replicas), - callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)], + callbacks=callbacks, ) + train_loss = history.history["loss"][-1] try: - train_perplexity = math.exp(history.history["loss"][-1]) + train_perplexity = math.exp(train_loss) except OverflowError: train_perplexity = math.inf - try: - validation_perplexity = math.exp(history.history["val_loss"][-1]) - except OverflowError: - validation_perplexity = math.inf - logger.warning(f" Final train loss: {history.history['loss'][-1]:.3f}") - logger.warning(f" Final train perplexity: {train_perplexity:.3f}") - logger.warning(f" Final validation loss: {history.history['val_loss'][-1]:.3f}") - logger.warning(f" Final validation perplexity: {validation_perplexity:.3f}") - # endregion + logger.info(f" Final train loss: {train_loss:.3f}") + logger.info(f" Final train perplexity: {train_perplexity:.3f}") + + validation_loss = history.history["val_loss"][-1] + try: + validation_perplexity = math.exp(validation_loss) + except OverflowError: + validation_perplexity = math.inf + logger.info(f" Final validation loss: {validation_loss:.3f}") + logger.info(f" Final validation perplexity: {validation_perplexity:.3f}") - if training_args.output_dir is not None: - model.save_pretrained(training_args.output_dir) + if training_args.output_dir is not None: + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + results_dict = dict() + results_dict["train_loss"] = train_loss + results_dict["train_perplexity"] = train_perplexity + results_dict["eval_loss"] = validation_loss + results_dict["eval_perplexity"] = validation_perplexity + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(results_dict)) + # endregion - if training_args.push_to_hub: - # You'll probably want to append some of your own metadata here! - model.push_to_hub() + if training_args.output_dir is not None and not training_args.push_to_hub: + # If we're not pushing to hub, at least save a local copy when we're done + model.save_pretrained(training_args.output_dir) if __name__ == "__main__": diff --git a/examples/tensorflow/multiple-choice/run_swag.py b/examples/tensorflow/multiple-choice/run_swag.py index 6ba35bd0fd2023..2684500d248db9 100644 --- a/examples/tensorflow/multiple-choice/run_swag.py +++ b/examples/tensorflow/multiple-choice/run_swag.py @@ -18,6 +18,7 @@ """ # You can also adapt this script on your own multiple choice task. Pointers for this are left as comments. +import json import logging import os import sys @@ -38,6 +39,7 @@ AutoTokenizer, DefaultDataCollator, HfArgumentParser, + PushToHubCallback, TFAutoModelForMultipleChoice, TFTrainingArguments, create_optimizer, @@ -54,16 +56,6 @@ # region Helper classes and functions -class SavePretrainedCallback(tf.keras.callbacks.Callback): - # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary - # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback - # that saves the model with this method after each epoch. - def __init__(self, output_dir, **kwargs): - super().__init__() - self.output_dir = output_dir - - def on_epoch_end(self, epoch, logs=None): - self.model.save_pretrained(self.output_dir) @dataclass @@ -391,7 +383,6 @@ def preprocess_function(examples): if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] - non_label_columns = [feature for feature in train_dataset.features if feature not in ("label", "labels")] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) @@ -407,8 +398,6 @@ def preprocess_function(examples): if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] - if not training_args.do_train: - non_label_columns = [feature for feature in eval_dataset.features if feature not in ("label", "labels")] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) @@ -444,79 +433,120 @@ def preprocess_function(examples): num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas + if training_args.do_train: - total_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs) + num_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs) + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 optimizer, lr_schedule = create_optimizer( - init_lr=training_args.learning_rate, num_train_steps=int(total_train_steps), num_warmup_steps=0 + init_lr=training_args.learning_rate, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, + adam_beta1=training_args.adam_beta1, + adam_beta2=training_args.adam_beta2, + adam_epsilon=training_args.adam_epsilon, + weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, ) else: - optimizer = "adam" # Just put anything in here, since we're not using it anyway - model.compile( - optimizer=optimizer, - loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), - metrics=[tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")], - ) + optimizer = None + model.compile(optimizer=optimizer, metrics=["accuracy"], jit_compile=training_args.xla) + # endregion + + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + push_to_hub_model_id = f"{model_name}-finetuned-multiplechoice" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice"} + + if training_args.push_to_hub: + callbacks = [ + PushToHubCallback( + output_dir=training_args.output_dir, + model_id=push_to_hub_model_id, + organization=training_args.push_to_hub_organization, + token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ] + else: + callbacks = [] # endregion # region Training + eval_metrics = None if training_args.do_train: - dataset_exclude_cols = set(non_label_columns + ["label"]) - tf_train_dataset = train_dataset.to_tf_dataset( - columns=[col for col in train_dataset.column_names if col not in dataset_exclude_cols], + dataset_options = tf.data.Options() + dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF + + # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in + # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also + # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names + # yourself if you use this method, whereas they are automatically inferred from the model input names when + # using model.prepare_tf_dataset() + # For more info see the docs: + # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset + # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset + + tf_train_dataset = model.prepare_tf_dataset( + train_dataset, shuffle=True, batch_size=total_train_batch_size, collate_fn=data_collator, - drop_remainder=True, - # `label_cols` is needed for user-defined losses, such as in this example - label_cols="label" if "label" in train_dataset.column_names else None, - ) + ).with_options(dataset_options) if training_args.do_eval: - validation_data = eval_dataset.to_tf_dataset( - columns=[col for col in eval_dataset.column_names if col not in dataset_exclude_cols], + validation_data = model.prepare_tf_dataset( + eval_dataset, shuffle=False, batch_size=total_eval_batch_size, collate_fn=data_collator, drop_remainder=True, - # `label_cols` is needed for user-defined losses, such as in this example - label_cols="label" if "label" in eval_dataset.column_names else None, - ) + ).with_options(dataset_options) else: validation_data = None - model.fit( + history = model.fit( tf_train_dataset, validation_data=validation_data, epochs=int(training_args.num_train_epochs), - callbacks=[SavePretrainedCallback(output_dir=training_args.output_dir)], + callbacks=callbacks, ) + eval_metrics = {key: val[-1] for key, val in history.history.items()} # endregion # region Evaluation if training_args.do_eval and not training_args.do_train: - dataset_exclude_cols = set(non_label_columns + ["label"]) + dataset_options = tf.data.Options() + dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF # Do a standalone evaluation pass - tf_eval_dataset = eval_dataset.to_tf_dataset( - columns=[col for col in eval_dataset.column_names if col not in dataset_exclude_cols], + tf_eval_dataset = model.prepare_tf_dataset( + eval_dataset, shuffle=False, batch_size=total_eval_batch_size, collate_fn=data_collator, drop_remainder=True, - # `label_cols` is needed for user-defined losses, such as in this example - label_cols="label" if "label" in eval_dataset.column_names else None, - ) - model.evaluate(tf_eval_dataset) + ).with_options(dataset_options) + eval_results = model.evaluate(tf_eval_dataset) + eval_metrics = {"val_loss": eval_results[0], "val_accuracy": eval_results[1]} # endregion + if eval_metrics is not None and training_args.output_dir is not None: + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(eval_metrics)) + # region Push to hub - if training_args.push_to_hub: - model.push_to_hub( - finetuned_from=model_args.model_name_or_path, - tasks="multiple-choice", - dataset_tags="swag", - dataset_args="regular", - dataset="SWAG", - language="en", - ) + + if training_args.output_dir is not None and not training_args.push_to_hub: + # If we're not pushing to hub, at least save a local copy when we're done + model.save_pretrained(training_args.output_dir) # endregion diff --git a/examples/tensorflow/question-answering/run_qa.py b/examples/tensorflow/question-answering/run_qa.py index 91293aefb35f55..7f53a9841509c7 100755 --- a/examples/tensorflow/question-answering/run_qa.py +++ b/examples/tensorflow/question-answering/run_qa.py @@ -18,6 +18,7 @@ """ # You can also adapt this script on your own question answering task. Pointers for this are left as comments. +import json import logging import os import sys @@ -33,13 +34,13 @@ from transformers import ( AutoConfig, AutoTokenizer, - DataCollatorWithPadding, - DefaultDataCollator, EvalPrediction, HfArgumentParser, PreTrainedTokenizerFast, + PushToHubCallback, TFAutoModelForQuestionAnswering, TFTrainingArguments, + create_optimizer, set_seed, ) from transformers.utils import CONFIG_NAME, TF2_WEIGHTS_NAME, check_min_version, send_example_telemetry @@ -609,7 +610,12 @@ def compute_metrics(p: EvalPrediction): # endregion with training_args.strategy.scope(): - # region Load model + + dataset_options = tf.data.Options() + dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF + num_replicas = training_args.strategy.num_replicas_in_sync + + # region Load model and prepare datasets if checkpoint is None: model_path = model_args.model_name_or_path else: @@ -621,71 +627,163 @@ def compute_metrics(p: EvalPrediction): revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) - optimizer = tf.keras.optimizers.Adam( - learning_rate=training_args.learning_rate, - beta_1=training_args.adam_beta1, - beta_2=training_args.adam_beta2, - epsilon=training_args.adam_epsilon, - clipnorm=training_args.max_grad_norm, - ) + if training_args.do_train: - # no user-specified loss = will use the model internal loss - model.compile(optimizer=optimizer) - # endregion + training_dataset = model.prepare_tf_dataset( + processed_datasets["train"], + shuffle=True, + batch_size=training_args.per_device_train_batch_size * num_replicas, + tokenizer=tokenizer, + ) + + training_dataset = training_dataset.with_options(dataset_options) + + num_train_steps = len(training_dataset) * training_args.num_train_epochs + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 + + optimizer, schedule = create_optimizer( + init_lr=training_args.learning_rate, + num_train_steps=len(training_dataset) * training_args.num_train_epochs, + num_warmup_steps=num_warmup_steps, + adam_beta1=training_args.adam_beta1, + adam_beta2=training_args.adam_beta2, + adam_epsilon=training_args.adam_epsilon, + weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, + ) + + # no user-specified loss = will use the model internal loss + model.compile(optimizer=optimizer, jit_compile=training_args.xla, metrics=["accuracy"]) - # region Training - if padding: - data_collator = DefaultDataCollator(return_tensors="tf") else: - data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf") - tensor_keys = ["attention_mask", "input_ids"] - label_keys = ["start_positions", "end_positions"] + model.compile(optimizer=None, jit_compile=training_args.xla, metrics=["accuracy"]) + training_dataset = None - if training_args.do_train: - # Make a tf.data.Dataset for this - training_dataset = processed_datasets["train"].to_tf_dataset( - # labels are passed as input, as we will use the model's internal loss - columns=tensor_keys + label_keys, - shuffle=True, - batch_size=training_args.per_device_train_batch_size, - collate_fn=data_collator, - drop_remainder=True, + if training_args.do_eval: + eval_dataset = model.prepare_tf_dataset( + processed_datasets["validation"], + shuffle=False, + batch_size=training_args.per_device_train_batch_size * num_replicas, + tokenizer=tokenizer, + ) + eval_dataset = eval_dataset.with_options(dataset_options) + else: + eval_dataset = None + + if training_args.do_predict: + predict_dataset = model.prepare_tf_dataset( + processed_datasets["test"], + shuffle=False, + batch_size=training_args.per_device_eval_batch_size * num_replicas, + tokenizer=tokenizer, ) - model.fit(training_dataset, epochs=int(training_args.num_train_epochs)) + predict_dataset = predict_dataset.with_options(dataset_options) + else: + predict_dataset = None + + # endregion + + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + if data_args.dataset_name is not None: + push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" + else: + push_to_hub_model_id = f"{model_name}-finetuned-question-answering" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} + if data_args.dataset_name is not None: + model_card_kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + model_card_kwargs["dataset_args"] = data_args.dataset_config_name + model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + model_card_kwargs["dataset"] = data_args.dataset_name + + if training_args.push_to_hub: + callbacks = [ + PushToHubCallback( + output_dir=training_args.output_dir, + model_id=push_to_hub_model_id, + organization=training_args.push_to_hub_organization, + token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ] + else: + callbacks = [] # endregion - # region Evaluation + # region Training and Evaluation + + if training_args.do_train: + # Note that the validation and test datasets have been processed in a different way to the + # training datasets in this example, and so they don't have the same label structure. + # As such, we don't pass them directly to Keras, but instead get model predictions to evaluate + # after training. + model.fit(training_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) + if training_args.do_eval: logger.info("*** Evaluation ***") - eval_inputs = { - "input_ids": tf.ragged.constant(processed_datasets["validation"]["input_ids"]).to_tensor(), - "attention_mask": tf.ragged.constant(processed_datasets["validation"]["attention_mask"]).to_tensor(), - } - eval_predictions = model.predict(eval_inputs) + + # In this example, we compute advanced metrics at the end of training, but + # if you'd like to compute metrics every epoch that are too complex to be written as + # standard Keras metrics, you can use our KerasMetricCallback. See + # https://huggingface.co/docs/transformers/main/en/main_classes/keras_callbacks + + eval_predictions = model.predict(eval_dataset) + if isinstance(eval_predictions.start_logits, tf.RaggedTensor): + # If predictions are RaggedTensor, we densify them. Since they are logits, padding with 0 is a bad idea! + # The reason is that a logit of 0 can often end up as quite a high probability value, sometimes even + # the highest probability in a sample. Instead, we use a large negative value, which ensures that the + # padding positions are correctly masked. + eval_start_logits = eval_predictions.start_logits.to_tensor(default_value=-1000).numpy() + eval_end_logits = eval_predictions.end_logits.to_tensor(default_value=-1000).numpy() + else: + eval_start_logits = eval_predictions.start_logits + eval_end_logits = eval_predictions.end_logits post_processed_eval = post_processing_function( datasets["validation"], processed_datasets["validation"], - (eval_predictions.start_logits, eval_predictions.end_logits), + (eval_start_logits, eval_end_logits), ) metrics = compute_metrics(post_processed_eval) logging.info("Evaluation metrics:") for metric, value in metrics.items(): logging.info(f"{metric}: {value:.3f}") + if training_args.output_dir is not None: + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(metrics)) # endregion # region Prediction if training_args.do_predict: logger.info("*** Predict ***") - predict_inputs = { - "input_ids": tf.ragged.constant(processed_datasets["test"]["input_ids"]).to_tensor(), - "attention_mask": tf.ragged.constant(processed_datasets["test"]["attention_mask"]).to_tensor(), - } - test_predictions = model.predict(predict_inputs) + + test_predictions = model.predict(predict_dataset) + if isinstance(test_predictions.start_logits, tf.RaggedTensor): + # If predictions are RaggedTensor, we densify them. Since they are logits, padding with 0 is a bad idea! + # The reason is that a logit of 0 can often end up as quite a high probability value, sometimes even + # the highest probability in a sample. Instead, we use a large negative value, which ensures that the + # padding positions are correctly masked. + test_start_logits = test_predictions.start_logits.to_tensor(default_value=-1000).numpy() + test_end_logits = test_predictions.end_logits.to_tensor(default_value=-1000).numpy() + else: + test_start_logits = test_predictions.start_logits + test_end_logits = test_predictions.end_logits post_processed_test = post_processing_function( datasets["test"], processed_datasets["test"], - (test_predictions.start_logits, test_predictions.end_logits), + (test_start_logits, test_end_logits), ) metrics = compute_metrics(post_processed_test) @@ -694,8 +792,9 @@ def compute_metrics(p: EvalPrediction): logging.info(f"{metric}: {value:.3f}") # endregion - if training_args.push_to_hub: - model.push_to_hub() + if training_args.output_dir is not None and not training_args.push_to_hub: + # If we're not pushing to hub, at least save a local copy when we're done + model.save_pretrained(training_args.output_dir) if __name__ == "__main__": diff --git a/examples/tensorflow/summarization/run_summarization.py b/examples/tensorflow/summarization/run_summarization.py index 6d4cf99e6782f8..2cf6bdba604b8d 100644 --- a/examples/tensorflow/summarization/run_summarization.py +++ b/examples/tensorflow/summarization/run_summarization.py @@ -18,11 +18,11 @@ """ # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. +import json import logging import os import sys from dataclasses import dataclass, field -from functools import partial from typing import Optional import datasets @@ -30,7 +30,6 @@ import numpy as np import tensorflow as tf from datasets import load_dataset -from tqdm import tqdm import evaluate import transformers @@ -38,7 +37,10 @@ from transformers import ( AutoConfig, AutoTokenizer, + DataCollatorForSeq2Seq, HfArgumentParser, + KerasMetricCallback, + PushToHubCallback, TFAutoModelForSeq2SeqLM, TFTrainingArguments, create_optimizer, @@ -253,7 +255,6 @@ def __post_init__(self): # endregion - # region Dataset name mappings summarization_name_mapping = { "amazon_reviews_multi": ("review_body", "review_title"), @@ -272,71 +273,6 @@ def __post_init__(self): # endregion -# region Data generator -def sample_generator(dataset, model, tokenizer, shuffle, pad_to_multiple_of=None): - if shuffle: - sample_ordering = np.random.permutation(len(dataset)) - else: - sample_ordering = np.arange(len(dataset)) - for sample_idx in sample_ordering: - example = dataset[int(sample_idx)] - # Handle dicts with proper padding and conversion to tensor. - example = tokenizer.pad(example, return_tensors="np", pad_to_multiple_of=pad_to_multiple_of) - example = {key: tf.convert_to_tensor(arr, dtype_hint=tf.int32) for key, arr in example.items()} - if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"): - decoder_input_ids = model.prepare_decoder_input_ids_from_labels( - labels=tf.expand_dims(example["labels"], 0) - ) - example["decoder_input_ids"] = tf.squeeze(decoder_input_ids, 0) - yield example, example["labels"] # TF needs some kind of labels, even if we don't use them - return - - -# endregion - - -# region Helper functions -def dataset_to_tf(dataset, model, tokenizer, total_batch_size, num_epochs, shuffle): - if dataset is None: - return None - train_generator = partial(sample_generator, dataset, model, tokenizer, shuffle=shuffle) - train_signature = { - feature: tf.TensorSpec(shape=(None,), dtype=tf.int32) - for feature in dataset.features - if feature != "special_tokens_mask" - } - if ( - model is not None - and "decoder_input_ids" not in train_signature - and hasattr(model, "prepare_decoder_input_ids_from_labels") - ): - train_signature["decoder_input_ids"] = train_signature["labels"] - # This may need to be changed depending on your particular model or tokenizer! - padding_values = { - key: tf.convert_to_tensor(tokenizer.pad_token_id if tokenizer.pad_token_id is not None else 0, dtype=tf.int32) - for key in train_signature.keys() - } - padding_values["labels"] = tf.convert_to_tensor(-100, dtype=tf.int32) - train_signature["labels"] = train_signature["input_ids"] - train_signature = (train_signature, train_signature["labels"]) - options = tf.data.Options() - options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF - tf_dataset = ( - tf.data.Dataset.from_generator(train_generator, output_signature=train_signature) - .with_options(options) - .padded_batch( - batch_size=total_batch_size, - drop_remainder=True, - padding_values=(padding_values, np.array(-100, dtype=np.int32)), - ) - .repeat(int(num_epochs)) - ) - return tf_dataset - - -# endregion - - def main(): # region Argument parsing # See all possible arguments in src/transformers/training_args.py @@ -587,59 +523,148 @@ def postprocess_text(preds, labels): if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") + label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id + data_collator = DataCollatorForSeq2Seq( + tokenizer, + model=model, + label_pad_token_id=label_pad_token_id, + pad_to_multiple_of=128, # Reduce the number of unique shapes for XLA, especially for generation + return_tensors="tf", + ) + + dataset_options = tf.data.Options() + dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF + num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas - tf_train_dataset = dataset_to_tf( + + # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in + # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also + # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names + # yourself if you use this method, whereas they are automatically inferred from the model input names when + # using model.prepare_tf_dataset() + # For more info see the docs: + # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset + # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset + + tf_train_dataset = model.prepare_tf_dataset( train_dataset, - model, - tokenizer, - total_batch_size=total_train_batch_size, - num_epochs=training_args.num_train_epochs, + collate_fn=data_collator, + batch_size=total_train_batch_size, shuffle=True, - ) - tf_eval_dataset = dataset_to_tf( + ).with_options(dataset_options) + tf_eval_dataset = model.prepare_tf_dataset( eval_dataset, - model, - tokenizer, - total_eval_batch_size, - num_epochs=1, + collate_fn=data_collator, + batch_size=total_eval_batch_size, shuffle=False, - ) + ).with_options(dataset_options) # endregion # region Optimizer, loss and LR scheduling - # Scheduler and math around the number of training steps. - num_update_steps_per_epoch = len(train_dataset) // total_train_batch_size - num_train_steps = training_args.num_train_epochs * num_update_steps_per_epoch - optimizer, lr_schedule = create_optimizer( - init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=0 - ) - - def masked_sparse_categorical_crossentropy(y_true, y_pred): - # We clip the negative labels to 0 to avoid NaNs appearing in the output and - # fouling up everything that comes afterwards. The loss values corresponding to clipped values - # will be masked later anyway, but even masked NaNs seem to cause overflows for some reason. - # 1e6 is chosen as a reasonable upper bound for the number of token indices - in the unlikely - # event that you have more than 1 million tokens in your vocabulary, consider increasing this value. - # More pragmatically, consider redesigning your tokenizer. - losses = tf.keras.losses.sparse_categorical_crossentropy( - tf.clip_by_value(y_true, 0, int(1e6)), y_pred, from_logits=True + num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 + if training_args.do_train: + optimizer, lr_schedule = create_optimizer( + init_lr=training_args.learning_rate, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, + adam_beta1=training_args.adam_beta1, + adam_beta2=training_args.adam_beta2, + adam_epsilon=training_args.adam_epsilon, + weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, ) - # Compute the per-sample loss only over the unmasked tokens - losses = tf.ragged.boolean_mask(losses, y_true != -100) - losses = tf.reduce_mean(losses, axis=-1) - return losses + else: + optimizer = None + + # endregion + # region Metric and KerasMetricCallback + if training_args.do_eval: + metric = evaluate.load("rouge") + + if data_args.val_max_target_length is None: + data_args.val_max_target_length = data_args.max_target_length + + gen_kwargs = { + "max_length": data_args.val_max_target_length if data_args is not None else config.max_length, + "num_beams": data_args.num_beams, + "no_repeat_ngram_size": 0, # Not supported under XLA right now, and some models set it by default + } + + def compute_metrics(preds): + predictions, labels = preds + if isinstance(predictions, tuple): + predictions = predictions[0] + decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) + labels = np.where(labels != -100, labels, tokenizer.pad_token_id) + decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) + decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) + metrics = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) + # Only print the mid f-measures, but there are a lot of other statistics in there too! + metrics = {key: round(val.mid.fmeasure * 100, 4) for key, val in metrics.items()} + return metrics + + # The KerasMetricCallback allows metrics that are too complex to write as standard Keras metrics + # to be computed each epoch. Any Python code can be included in the metric_fn. This is especially + # useful for metrics like BLEU and ROUGE that perform string comparisons on decoded model outputs. + # For more information, see the docs at + # https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.KerasMetricCallback + + metric_callback = KerasMetricCallback( + metric_fn=compute_metrics, + eval_dataset=tf_eval_dataset, + predict_with_generate=True, + use_xla_generation=True, + generate_kwargs=gen_kwargs, + ) + callbacks = [metric_callback] + else: + callbacks = [] # endregion - # region Metric - metric = evaluate.load("rouge") + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + if data_args.dataset_name is not None: + push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" + else: + push_to_hub_model_id = f"{model_name}-finetuned-summarization" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"} + if data_args.dataset_name is not None: + model_card_kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + model_card_kwargs["dataset_args"] = data_args.dataset_config_name + model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + model_card_kwargs["dataset"] = data_args.dataset_name + + if training_args.push_to_hub: + # Because this training can be quite long, we save once per epoch. + callbacks.append( + PushToHubCallback( + output_dir=training_args.output_dir, + model_id=push_to_hub_model_id, + organization=training_args.push_to_hub_organization, + token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ) # endregion # region Training - model.compile(loss={"logits": masked_sparse_categorical_crossentropy}, optimizer=optimizer) - + model.compile(optimizer=optimizer, jit_compile=training_args.xla) + eval_metrics = None if training_args.do_train: logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") @@ -648,28 +673,29 @@ def masked_sparse_categorical_crossentropy(y_true, y_pred): logger.info(f" Total train batch size = {total_train_batch_size}") logger.info(f" Total optimization steps = {num_train_steps}") - model.fit( - tf_train_dataset, - epochs=int(training_args.num_train_epochs), - steps_per_epoch=num_update_steps_per_epoch, - ) + if training_args.xla and not data_args.pad_to_max_length: + logger.warning( + "XLA training may be slow at first when --pad_to_max_length is not set " + "until all possible shapes have been compiled." + ) + history = model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) + eval_metrics = {key: val[-1] for key, val in history.history.items()} # endregion # region Validation - if data_args.val_max_target_length is None: - data_args.val_max_target_length = data_args.max_target_length - gen_kwargs = { - "max_length": data_args.val_max_target_length if data_args is not None else config.max_length, - "num_beams": data_args.num_beams, - } - if training_args.do_eval: + if training_args.do_eval and not training_args.do_train: + # Do a standalone evaluation run logger.info("Evaluation...") - for batch, labels in tqdm( - tf_eval_dataset, total=len(eval_dataset) // training_args.per_device_eval_batch_size - ): + + # Compiling generation with XLA yields enormous speedups, see https://huggingface.co/blog/tf-xla-generate + @tf.function(jit_compile=True) + def generate(**kwargs): + return model.generate(**kwargs) + + for batch, labels in tf_eval_dataset: batch.update(gen_kwargs) - generated_tokens = model.generate(**batch) + generated_tokens = generate(**batch) if isinstance(generated_tokens, tuple): generated_tokens = generated_tokens[0] decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) @@ -679,13 +705,19 @@ def masked_sparse_categorical_crossentropy(y_true, y_pred): metric.add_batch(predictions=decoded_preds, references=decoded_labels) - result = metric.compute(use_stemmer=True) - result = {k: round(v * 100, 4) for k, v in result.items()} + eval_metrics = metric.compute(use_stemmer=True) + result = {key: round(val.mid.fmeasure * 100, 4) for key, val in eval_metrics.items()} logger.info(result) # endregion - if training_args.output_dir is not None: + if training_args.output_dir is not None and eval_metrics is not None: + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(eval_metrics)) + + if training_args.output_dir is not None and not training_args.push_to_hub: + # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) diff --git a/examples/tensorflow/test_tensorflow_examples.py b/examples/tensorflow/test_tensorflow_examples.py new file mode 100644 index 00000000000000..9b692ce80cbdd6 --- /dev/null +++ b/examples/tensorflow/test_tensorflow_examples.py @@ -0,0 +1,295 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import argparse +import json +import logging +import os +import sys +from unittest import skip +from unittest.mock import patch + +import tensorflow as tf + +from transformers.testing_utils import TestCasePlus, get_gpu_count, slow + + +SRC_DIRS = [ + os.path.join(os.path.dirname(__file__), dirname) + for dirname in [ + "text-generation", + "text-classification", + "token-classification", + "language-modeling", + "multiple-choice", + "question-answering", + "summarization", + "translation", + ] +] +sys.path.extend(SRC_DIRS) + + +if SRC_DIRS is not None: + import run_clm + import run_mlm + import run_ner + import run_qa as run_squad + import run_summarization + import run_swag + import run_text_classification + import run_translation + + +logging.basicConfig(level=logging.DEBUG) + +logger = logging.getLogger() + + +def get_setup_file(): + parser = argparse.ArgumentParser() + parser.add_argument("-f") + args = parser.parse_args() + return args.f + + +def get_results(output_dir): + results = {} + path = os.path.join(output_dir, "all_results.json") + if os.path.exists(path): + with open(path, "r") as f: + results = json.load(f) + else: + raise ValueError(f"can't find {path}") + return results + + +def is_cuda_available(): + return bool(tf.config.list_physical_devices("GPU")) + + +stream_handler = logging.StreamHandler(sys.stdout) +logger.addHandler(stream_handler) + + +class ExamplesTests(TestCasePlus): + @skip("Skipping until shape inference for to_tf_dataset PR is merged.") + def test_run_text_classification(self): + tmp_dir = self.get_auto_remove_tmp_dir() + testargs = f""" + run_text_classification.py + --model_name_or_path distilbert-base-uncased + --output_dir {tmp_dir} + --overwrite_output_dir + --train_file ./tests/fixtures/tests_samples/MRPC/train.csv + --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv + --do_train + --do_eval + --per_device_train_batch_size=2 + --per_device_eval_batch_size=1 + --learning_rate=1e-4 + --max_steps=10 + --warmup_steps=2 + --seed=42 + --max_seq_length=128 + """.split() + + if is_cuda_available(): + testargs.append("--fp16") + + with patch.object(sys, "argv", testargs): + run_text_classification.main() + # Reset the mixed precision policy so we don't break other tests + tf.keras.mixed_precision.set_global_policy("float32") + result = get_results(tmp_dir) + self.assertGreaterEqual(result["eval_accuracy"], 0.75) + + def test_run_clm(self): + tmp_dir = self.get_auto_remove_tmp_dir() + testargs = f""" + run_clm.py + --model_name_or_path distilgpt2 + --train_file ./tests/fixtures/sample_text.txt + --validation_file ./tests/fixtures/sample_text.txt + --do_train + --do_eval + --block_size 128 + --per_device_train_batch_size 2 + --per_device_eval_batch_size 1 + --num_train_epochs 2 + --output_dir {tmp_dir} + --overwrite_output_dir + """.split() + + if len(tf.config.list_physical_devices("GPU")) > 1: + # Skipping because there are not enough batches to train the model + would need a drop_last to work. + return + + with patch.object(sys, "argv", testargs): + run_clm.main() + result = get_results(tmp_dir) + self.assertLess(result["eval_perplexity"], 100) + + def test_run_mlm(self): + tmp_dir = self.get_auto_remove_tmp_dir() + testargs = f""" + run_mlm.py + --model_name_or_path distilroberta-base + --train_file ./tests/fixtures/sample_text.txt + --validation_file ./tests/fixtures/sample_text.txt + --max_seq_length 64 + --output_dir {tmp_dir} + --overwrite_output_dir + --do_train + --do_eval + --prediction_loss_only + --num_train_epochs=1 + """.split() + + with patch.object(sys, "argv", testargs): + run_mlm.main() + result = get_results(tmp_dir) + self.assertLess(result["eval_perplexity"], 42) + + def test_run_ner(self): + # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu + epochs = 7 if get_gpu_count() > 1 else 2 + + tmp_dir = self.get_auto_remove_tmp_dir() + testargs = f""" + run_ner.py + --model_name_or_path bert-base-uncased + --train_file tests/fixtures/tests_samples/conll/sample.json + --validation_file tests/fixtures/tests_samples/conll/sample.json + --output_dir {tmp_dir} + --overwrite_output_dir + --do_train + --do_eval + --warmup_steps=2 + --learning_rate=2e-4 + --per_device_train_batch_size=2 + --per_device_eval_batch_size=2 + --num_train_epochs={epochs} + --seed 7 + """.split() + + with patch.object(sys, "argv", testargs): + run_ner.main() + result = get_results(tmp_dir) + self.assertGreaterEqual(result["accuracy"], 0.75) + + def test_run_squad(self): + tmp_dir = self.get_auto_remove_tmp_dir() + testargs = f""" + run_qa.py + --model_name_or_path bert-base-uncased + --version_2_with_negative + --train_file tests/fixtures/tests_samples/SQUAD/sample.json + --validation_file tests/fixtures/tests_samples/SQUAD/sample.json + --output_dir {tmp_dir} + --overwrite_output_dir + --max_steps=10 + --warmup_steps=2 + --do_train + --do_eval + --learning_rate=2e-4 + --per_device_train_batch_size=2 + --per_device_eval_batch_size=1 + """.split() + + with patch.object(sys, "argv", testargs): + run_squad.main() + result = get_results(tmp_dir) + self.assertGreaterEqual(result["f1"], 30) + self.assertGreaterEqual(result["exact"], 30) + + def test_run_swag(self): + tmp_dir = self.get_auto_remove_tmp_dir() + testargs = f""" + run_swag.py + --model_name_or_path bert-base-uncased + --train_file tests/fixtures/tests_samples/swag/sample.json + --validation_file tests/fixtures/tests_samples/swag/sample.json + --output_dir {tmp_dir} + --overwrite_output_dir + --max_steps=20 + --warmup_steps=2 + --do_train + --do_eval + --learning_rate=2e-4 + --per_device_train_batch_size=2 + --per_device_eval_batch_size=1 + """.split() + + with patch.object(sys, "argv", testargs): + run_swag.main() + result = get_results(tmp_dir) + self.assertGreaterEqual(result["val_accuracy"], 0.8) + + @slow + def test_run_summarization(self): + tmp_dir = self.get_auto_remove_tmp_dir() + testargs = f""" + run_summarization.py + --model_name_or_path t5-small + --train_file tests/fixtures/tests_samples/xsum/sample.json + --validation_file tests/fixtures/tests_samples/xsum/sample.json + --output_dir {tmp_dir} + --overwrite_output_dir + --max_steps=50 + --warmup_steps=8 + --do_train + --do_eval + --learning_rate=2e-4 + --per_device_train_batch_size=2 + --per_device_eval_batch_size=1 + """.split() + + with patch.object(sys, "argv", testargs): + run_summarization.main() + result = get_results(tmp_dir) + self.assertGreaterEqual(result["rouge1"], 10) + self.assertGreaterEqual(result["rouge2"], 2) + self.assertGreaterEqual(result["rougeL"], 7) + self.assertGreaterEqual(result["rougeLsum"], 7) + + @slow + def test_run_translation(self): + tmp_dir = self.get_auto_remove_tmp_dir() + testargs = f""" + run_translation.py + --model_name_or_path Rocketknight1/student_marian_en_ro_6_1 + --source_lang en + --target_lang ro + --train_file tests/fixtures/tests_samples/wmt16/sample.json + --validation_file tests/fixtures/tests_samples/wmt16/sample.json + --output_dir {tmp_dir} + --overwrite_output_dir + --warmup_steps=8 + --do_train + --do_eval + --learning_rate=3e-3 + --num_train_epochs 12 + --per_device_train_batch_size=2 + --per_device_eval_batch_size=1 + --source_lang en_XX + --target_lang ro_RO + """.split() + + with patch.object(sys, "argv", testargs): + run_translation.main() + result = get_results(tmp_dir) + self.assertGreaterEqual(result["bleu"], 30) diff --git a/examples/tensorflow/text-classification/run_glue.py b/examples/tensorflow/text-classification/run_glue.py index 9fb0b3f8e43482..d5a6b096b3467e 100644 --- a/examples/tensorflow/text-classification/run_glue.py +++ b/examples/tensorflow/text-classification/run_glue.py @@ -16,6 +16,7 @@ """ Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. +import json import logging import os import sys @@ -35,32 +36,16 @@ DefaultDataCollator, HfArgumentParser, PretrainedConfig, + PushToHubCallback, TFAutoModelForSequenceClassification, TFTrainingArguments, + create_optimizer, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version, send_example_telemetry -# region Helper functions - - -class SavePretrainedCallback(tf.keras.callbacks.Callback): - # Hugging Face models have a save_pretrained() method that saves both the weights and the necessary - # metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback - # that saves the model with this method after each epoch. - def __init__(self, output_dir, **kwargs): - super().__init__() - self.output_dir = output_dir - - def on_epoch_end(self, epoch, logs=None): - self.model.save_pretrained(self.output_dir) - - -# endregion - - # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.22.0.dev0") @@ -312,7 +297,6 @@ def main(): # region Dataset preprocessing sentence1_key, sentence2_key = task_to_keys[data_args.task_name] - non_label_column_names = [name for name in datasets["train"].column_names if name != "label"] # Padding strategy if data_args.pad_to_max_length: @@ -394,24 +378,11 @@ def compute_metrics(preds, label_ids): ) # endregion - # region Optimizer, loss and compilation - optimizer = tf.keras.optimizers.Adam( - learning_rate=training_args.learning_rate, - beta_1=training_args.adam_beta1, - beta_2=training_args.adam_beta2, - epsilon=training_args.adam_epsilon, - clipnorm=training_args.max_grad_norm, - ) - if is_regression: - loss_fn = tf.keras.losses.MeanSquaredError() - metrics = [] - else: - loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) - metrics = ["accuracy"] - model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics) - # endregion - # region Convert data to a tf.data.Dataset + dataset_options = tf.data.Options() + dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF + num_replicas = training_args.strategy.num_replicas_in_sync + tf_data = dict() max_samples = { "train": data_args.max_train_samples, @@ -428,31 +399,89 @@ def compute_metrics(preds, label_ids): assert "label" in datasets[key].features, f"Missing labels from {key} data!" if key == "train": shuffle = True - batch_size = training_args.per_device_train_batch_size - drop_remainder = True # Saves us worrying about scaling gradients for the last batch + batch_size = training_args.per_device_train_batch_size * num_replicas else: shuffle = False - batch_size = training_args.per_device_eval_batch_size - drop_remainder = False + batch_size = training_args.per_device_eval_batch_size * num_replicas samples_limit = max_samples[key] dataset = datasets[key] if samples_limit is not None: dataset = dataset.select(range(samples_limit)) - data = dataset.to_tf_dataset( - columns=[col for col in dataset.column_names if col not in set(non_label_column_names + ["label"])], + + # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in + # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also + # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names + # yourself if you use this method, whereas they are automatically inferred from the model input names when + # using model.prepare_tf_dataset() + # For more info see the docs: + # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset + # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset + data = model.prepare_tf_dataset( + dataset, shuffle=shuffle, batch_size=batch_size, collate_fn=data_collator, - drop_remainder=drop_remainder, - # `label_cols` is needed for user-defined losses, such as in this example - label_cols="label" if "label" in dataset.column_names else None, + tokenizer=tokenizer, ) + data = data.with_options(dataset_options) tf_data[key] = data # endregion + # region Optimizer, loss and compilation + if training_args.do_train: + num_train_steps = len(tf_data["train"]) * training_args.num_train_epochs + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 + + optimizer, schedule = create_optimizer( + init_lr=training_args.learning_rate, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, + adam_beta1=training_args.adam_beta1, + adam_beta2=training_args.adam_beta2, + adam_epsilon=training_args.adam_epsilon, + weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, + ) + else: + optimizer = "adam" # Just write anything because we won't be using it + if is_regression: + metrics = [] + else: + metrics = ["accuracy"] + model.compile(optimizer=optimizer, metrics=metrics, jit_compile=training_args.xla) + # endregion + + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + push_to_hub_model_id = f"{model_name}-finetuned-glue" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} + model_card_kwargs["task_name"] = data_args.task_name + + if training_args.push_to_hub: + callbacks = [ + PushToHubCallback( + output_dir=training_args.output_dir, + model_id=push_to_hub_model_id, + organization=training_args.push_to_hub_organization, + token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ] + else: + callbacks = [] + # endregion + # region Training and validation if training_args.do_train: - callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)] if training_args.do_eval and not data_args.task_name == "mnli": # Do both evaluation and training in the Keras fit loop, unless the task is MNLI # because MNLI has two validation sets @@ -472,6 +501,12 @@ def compute_metrics(preds, label_ids): # We normally do validation as part of the Keras fit loop, but we run it independently # if there was no fit() step (because we didn't train the model) or if the task is MNLI, # because MNLI has a separate validation-mismatched validation set + + # In this example, we compute advanced metrics only at the end of training, and only compute + # loss and accuracy on the validation set each epoch, but + # if you'd like to compute metrics every epoch that are too complex to be written as + # standard Keras metrics, you can use our KerasMetricCallback. See + # https://huggingface.co/docs/transformers/main/en/main_classes/keras_callbacks logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) @@ -489,6 +524,10 @@ def compute_metrics(preds, label_ids): eval_metrics = compute_metrics(eval_predictions, raw_dataset["label"]) print(f"Evaluation metrics ({task}):") print(eval_metrics) + if training_args.output_dir is not None: + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(eval_metrics)) # endregion @@ -538,6 +577,10 @@ def compute_metrics(preds, label_ids): writer.write(f"{index}\t{item}\n") # endregion + if training_args.output_dir is not None and not training_args.push_to_hub: + # If we're not pushing to hub, at least save a local copy when we're done + model.save_pretrained(training_args.output_dir) + if __name__ == "__main__": main() diff --git a/examples/tensorflow/text-classification/run_text_classification.py b/examples/tensorflow/text-classification/run_text_classification.py index b5d19032971c5b..0cf1972e937fb8 100644 --- a/examples/tensorflow/text-classification/run_text_classification.py +++ b/examples/tensorflow/text-classification/run_text_classification.py @@ -16,6 +16,7 @@ """ Fine-tuning the library models for sequence classification.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. +import json import logging import os import sys @@ -29,12 +30,12 @@ from transformers import ( AutoConfig, AutoTokenizer, - DataCollatorWithPadding, - DefaultDataCollator, HfArgumentParser, PretrainedConfig, + PushToHubCallback, TFAutoModelForSequenceClassification, TFTrainingArguments, + create_optimizer, set_seed, ) from transformers.utils import CONFIG_NAME, TF2_WEIGHTS_NAME, send_example_telemetry @@ -383,10 +384,6 @@ def preprocess_function(examples): datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) - if data_args.pad_to_max_length: - data_collator = DefaultDataCollator(return_tensors="tf") - else: - data_collator = DataCollatorWithPadding(tokenizer, return_tensors="tf") # endregion with training_args.strategy.scope(): @@ -409,24 +406,10 @@ def preprocess_function(examples): ) # endregion - # region Optimizer, loss and compilation - optimizer = tf.keras.optimizers.Adam( - learning_rate=training_args.learning_rate, - beta_1=training_args.adam_beta1, - beta_2=training_args.adam_beta2, - epsilon=training_args.adam_epsilon, - clipnorm=training_args.max_grad_norm, - ) - if is_regression: - loss_fn = tf.keras.losses.MeanSquaredError() - metrics = [] - else: - loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) - metrics = ["accuracy"] - model.compile(optimizer=optimizer, loss=loss_fn, metrics=metrics) - # endregion - # region Convert data to a tf.data.Dataset + dataset_options = tf.data.Options() + dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF + num_replicas = training_args.strategy.num_replicas_in_sync tf_data = dict() max_samples = { @@ -438,50 +421,121 @@ def preprocess_function(examples): if key not in datasets: tf_data[key] = None continue + if ( + (key == "train" and not training_args.do_train) + or (key == "validation" and not training_args.do_eval) + or (key == "test" and not training_args.do_predict) + ): + tf_data[key] = None + continue if key in ("train", "validation"): assert "label" in datasets[key].features, f"Missing labels from {key} data!" if key == "train": shuffle = True - batch_size = training_args.per_device_train_batch_size - drop_remainder = True # Saves us worrying about scaling gradients for the last batch + batch_size = training_args.per_device_train_batch_size * num_replicas else: shuffle = False - batch_size = training_args.per_device_eval_batch_size - drop_remainder = False + batch_size = training_args.per_device_eval_batch_size * num_replicas samples_limit = max_samples[key] dataset = datasets[key] if samples_limit is not None: dataset = dataset.select(range(samples_limit)) - data = dataset.to_tf_dataset( - columns=[col for col in dataset.column_names if col not in set(non_label_column_names + ["label"])], + + # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in + # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also + # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names + # yourself if you use this method, whereas they are automatically inferred from the model input names when + # using model.prepare_tf_dataset() + # For more info see the docs: + # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset + # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset + + data = model.prepare_tf_dataset( + dataset, shuffle=shuffle, batch_size=batch_size, - collate_fn=data_collator, - drop_remainder=drop_remainder, - # `label_cols` is needed for user-defined losses, such as in this example - label_cols="label" if "label" in dataset.column_names else None, + tokenizer=tokenizer, ) + data = data.with_options(dataset_options) tf_data[key] = data # endregion + # region Optimizer, loss and compilation + + if training_args.do_train: + num_train_steps = len(tf_data["train"]) * training_args.num_train_epochs + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 + + optimizer, schedule = create_optimizer( + init_lr=training_args.learning_rate, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, + adam_beta1=training_args.adam_beta1, + adam_beta2=training_args.adam_beta2, + adam_epsilon=training_args.adam_epsilon, + weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, + ) + else: + optimizer = None + if is_regression: + metrics = [] + else: + metrics = ["accuracy"] + model.compile(optimizer=optimizer, metrics=metrics) + # endregion + + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + push_to_hub_model_id = f"{model_name}-finetuned-text-classification" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} + + if training_args.push_to_hub: + callbacks = [ + PushToHubCallback( + output_dir=training_args.output_dir, + model_id=push_to_hub_model_id, + organization=training_args.push_to_hub_organization, + token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ] + else: + callbacks = [] + # endregion + # region Training and validation if tf_data["train"] is not None: - callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)] model.fit( tf_data["train"], validation_data=tf_data["validation"], epochs=int(training_args.num_train_epochs), callbacks=callbacks, ) - elif tf_data["validation"] is not None: - # If there's a validation dataset but no training set, just evaluate the metrics + if tf_data["validation"] is not None: logger.info("Computing metrics on validation data...") if is_regression: loss = model.evaluate(tf_data["validation"]) - logger.info(f"Loss: {loss:.5f}") + logger.info(f"Eval loss: {loss:.5f}") else: loss, accuracy = model.evaluate(tf_data["validation"]) - logger.info(f"Loss: {loss:.5f}, Accuracy: {accuracy * 100:.4f}%") + logger.info(f"Eval loss: {loss:.5f}, Eval accuracy: {accuracy * 100:.4f}%") + if training_args.output_dir is not None: + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + eval_dict = {"eval_loss": loss} + if not is_regression: + eval_dict["eval_accuracy"] = accuracy + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(eval_dict)) # endregion # region Prediction @@ -501,14 +555,9 @@ def preprocess_function(examples): logger.info(f"Wrote predictions to {output_test_file}!") # endregion - # region Prediction losses - # This section is outside the scope() because it's very quick to compute, but behaves badly inside it - if "test" in datasets and "label" in datasets["test"].features: - print("Computing prediction loss on test labels...") - labels = datasets["test"]["label"] - loss = float(loss_fn(labels, predictions).numpy()) - print(f"Test loss: {loss:.4f}") - # endregion + if training_args.output_dir is not None and not training_args.push_to_hub: + # If we're not pushing to hub, at least save a local copy when we're done + model.save_pretrained(training_args.output_dir) if __name__ == "__main__": diff --git a/examples/tensorflow/token-classification/run_ner.py b/examples/tensorflow/token-classification/run_ner.py index caa47e115a4bfa..8eb9aef92b7bd6 100644 --- a/examples/tensorflow/token-classification/run_ner.py +++ b/examples/tensorflow/token-classification/run_ner.py @@ -18,14 +18,14 @@ without using a Trainer. """ +import json import logging +import os import random from dataclasses import dataclass, field -from functools import partial from typing import Optional import datasets -import numpy as np import tensorflow as tf from datasets import ClassLabel, load_dataset @@ -33,10 +33,11 @@ import transformers from transformers import ( CONFIG_MAPPING, - MODEL_MAPPING, AutoConfig, AutoTokenizer, + DataCollatorForTokenClassification, HfArgumentParser, + PushToHubCallback, TFAutoModelForTokenClassification, TFTrainingArguments, create_optimizer, @@ -48,11 +49,7 @@ logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) -require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") - -# You should update this to your particular problem to have better documentation of `model_type` -MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) -MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) +require_version("datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/token-classification/requirements.txt") # region Command-line arguments @@ -195,61 +192,6 @@ def __post_init__(self): # endregion -# region Data generator -def sample_generator(dataset, tokenizer, shuffle, pad_to_multiple_of=None): - # Trim off the last partial batch if present - if shuffle: - sample_ordering = np.random.permutation(len(dataset)) - else: - sample_ordering = np.arange(len(dataset)) - for sample_idx in sample_ordering: - example = dataset[int(sample_idx)] - # Handle dicts with proper padding and conversion to tensor. - example = tokenizer.pad(example, return_tensors="np", pad_to_multiple_of=pad_to_multiple_of) - if tokenizer.pad_token_id is not None: - example["labels"][example["attention_mask"] == 0] = -100 - example = {key: tf.convert_to_tensor(arr) for key, arr in example.items()} - - yield example, example["labels"] # TF needs some kind of labels, even if we don't use them - return - - -# endregion - - -# region Helper functions -def dataset_to_tf(dataset, tokenizer, total_batch_size, num_epochs, shuffle): - train_generator = partial(sample_generator, dataset, tokenizer, shuffle=shuffle) - train_signature = { - feature: tf.TensorSpec(shape=(None,), dtype=tf.int64) - for feature in dataset.features - if feature != "special_tokens_mask" - } - # This may need to be changed depending on your particular model or tokenizer! - padding_values = {key: tf.convert_to_tensor(0, dtype=tf.int64) for key in dataset.features} - padding_values["labels"] = tf.convert_to_tensor(-100, dtype=tf.int64) - if tokenizer.pad_token_id is not None: - padding_values["input_ids"] = tf.convert_to_tensor(tokenizer.pad_token_id, dtype=tf.int64) - train_signature["labels"] = train_signature["input_ids"] - train_signature = (train_signature, train_signature["labels"]) - options = tf.data.Options() - options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF - tf_dataset = ( - tf.data.Dataset.from_generator(train_generator, output_signature=train_signature) - .with_options(options) - .padded_batch( - batch_size=total_batch_size, - drop_remainder=True, - padding_values=(padding_values, np.array(0, dtype=np.int64)), - ) - .repeat(int(num_epochs)) - ) - return tf_dataset - - -# endregion - - def main(): # region Argument Parsing parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) @@ -419,6 +361,14 @@ def tokenize_and_align_labels(examples): train_dataset = processed_raw_datasets["train"] eval_dataset = processed_raw_datasets["validation"] + if data_args.max_train_samples is not None: + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + + if data_args.max_eval_samples is not None: + max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) + eval_dataset = eval_dataset.select(range(max_eval_samples)) + # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") @@ -439,43 +389,62 @@ def tokenize_and_align_labels(examples): # endregion # region Create TF datasets + + # We need the DataCollatorForTokenClassification here, as we need to correctly pad labels as + # well as inputs. + collate_fn = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf") num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas - train_batches_per_epoch = len(train_dataset) // total_train_batch_size - tf_train_dataset = dataset_to_tf( + + dataset_options = tf.data.Options() + dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF + + # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in + # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also + # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names + # yourself if you use this method, whereas they are automatically inferred from the model input names when + # using model.prepare_tf_dataset() + # For more info see the docs: + # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset + # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset + + tf_train_dataset = model.prepare_tf_dataset( train_dataset, - tokenizer, - total_batch_size=total_train_batch_size, - num_epochs=training_args.num_train_epochs, + collate_fn=collate_fn, + batch_size=total_train_batch_size, shuffle=True, - ) + ).with_options(dataset_options) total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas - eval_batches_per_epoch = len(eval_dataset) // total_eval_batch_size - tf_eval_dataset = dataset_to_tf( + tf_eval_dataset = model.prepare_tf_dataset( eval_dataset, - tokenizer, - total_batch_size=total_eval_batch_size, - num_epochs=training_args.num_train_epochs, + collate_fn=collate_fn, + batch_size=total_eval_batch_size, shuffle=False, - ) + ).with_options(dataset_options) # endregion # region Optimizer, loss and compilation + num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 + optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, - num_train_steps=int(training_args.num_train_epochs * train_batches_per_epoch), - num_warmup_steps=training_args.warmup_steps, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, ) - def dummy_loss(y_true, y_pred): - return tf.reduce_mean(y_pred) - - model.compile(loss={"loss": dummy_loss}, optimizer=optimizer) + model.compile(optimizer=optimizer, jit_compile=training_args.xla) # endregion # Metrics @@ -517,6 +486,39 @@ def compute_metrics(): # endregion + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + if data_args.dataset_name is not None: + push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" + else: + push_to_hub_model_id = f"{model_name}-finetuned-token-classification" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} + if data_args.dataset_name is not None: + model_card_kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + model_card_kwargs["dataset_args"] = data_args.dataset_config_name + model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + model_card_kwargs["dataset"] = data_args.dataset_name + + if training_args.push_to_hub: + callbacks = [ + PushToHubCallback( + output_dir=training_args.output_dir, + model_id=push_to_hub_model_id, + organization=training_args.push_to_hub_organization, + token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ] + else: + callbacks = [] + # endregion + # region Training logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") @@ -524,23 +526,43 @@ def compute_metrics(): logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size = {total_train_batch_size}") # Only show the progress bar once on each machine. + model.fit( tf_train_dataset, validation_data=tf_eval_dataset, epochs=int(training_args.num_train_epochs), - steps_per_epoch=train_batches_per_epoch, - validation_steps=eval_batches_per_epoch, + callbacks=callbacks, ) # endregion # region Predictions - # For predictions, we preload the entire validation set - note that if you have a really giant validation - # set, you might need to change this! - eval_inputs = {key: tf.ragged.constant(eval_dataset[key]).to_tensor() for key in eval_dataset.features} - predictions = model.predict(eval_inputs, batch_size=training_args.per_device_eval_batch_size)["logits"] - predictions = tf.math.argmax(predictions, axis=-1) - labels = np.array(eval_inputs["labels"]) - labels[np.array(eval_inputs["attention_mask"]) == 0] = -100 + # If you have variable batch sizes (i.e. not using pad_to_max_length), then + # this bit might fail on TF < 2.8 because TF can't concatenate outputs of varying seq + # length from predict(). + + try: + predictions = model.predict(tf_eval_dataset, batch_size=training_args.per_device_eval_batch_size)["logits"] + except tf.python.framework.errors_impl.InvalidArgumentError: + raise ValueError( + "Concatenating predictions failed! If your version of TensorFlow is 2.8.0 or older " + "then you will need to use --pad_to_max_length to generate predictions, as older " + "versions of TensorFlow cannot concatenate variable-length predictions as RaggedTensor." + ) + if isinstance(predictions, tf.RaggedTensor): + predictions = predictions.to_tensor(default_value=-100) + predictions = tf.math.argmax(predictions, axis=-1).numpy() + if "label" in eval_dataset: + labels = eval_dataset.with_format("tf")["label"] + else: + labels = eval_dataset.with_format("tf")["labels"] + if isinstance(labels, tf.RaggedTensor): + labels = labels.to_tensor(default_value=-100) + labels = labels.numpy() + attention_mask = eval_dataset.with_format("tf")["attention_mask"] + if isinstance(attention_mask, tf.RaggedTensor): + attention_mask = attention_mask.to_tensor(default_value=-100) + attention_mask = attention_mask.numpy() + labels[attention_mask == 0] = -100 preds, refs = get_labels(predictions, labels) metric.add_batch( predictions=preds, @@ -550,12 +572,15 @@ def compute_metrics(): logger.info("Evaluation metrics:") for key, val in eval_metric.items(): logger.info(f"{key}: {val:.4f}") - # endregion - # We don't do predictions in the strategy scope because there are some issues in there right now. - # They'll get fixed eventually, promise! + if training_args.output_dir is not None: + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(eval_metric)) + # endregion - if training_args.output_dir is not None: + if training_args.output_dir is not None and not training_args.push_to_hub: + # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) diff --git a/examples/tensorflow/translation/run_translation.py b/examples/tensorflow/translation/run_translation.py index 7f5eb9eb9defb7..7ccd089ca82dce 100644 --- a/examples/tensorflow/translation/run_translation.py +++ b/examples/tensorflow/translation/run_translation.py @@ -18,30 +18,32 @@ """ # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. +import json import logging import os import sys from dataclasses import dataclass, field -from functools import partial from typing import Optional import datasets import numpy as np import tensorflow as tf from datasets import load_dataset -from tqdm import tqdm import evaluate import transformers from transformers import ( AutoConfig, AutoTokenizer, + DataCollatorForSeq2Seq, HfArgumentParser, + KerasMetricCallback, M2M100Tokenizer, MBart50Tokenizer, MBart50TokenizerFast, MBartTokenizer, MBartTokenizerFast, + PushToHubCallback, TFAutoModelForSeq2SeqLM, TFTrainingArguments, create_optimizer, @@ -224,6 +226,16 @@ class DataTrainingArguments: source_prefix: Optional[str] = field( default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} ) + forced_bos_token: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The token to force as the first generated token after the :obj:`decoder_start_token_id`.Useful for" + " multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token needs to" + " be the target language token.(Usually it is the target language token)" + ) + }, + ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: @@ -239,70 +251,6 @@ def __post_init__(self): self.val_max_target_length = self.max_target_length -# endregion - -# region Data generator -def sample_generator(dataset, model, tokenizer, shuffle, pad_to_multiple_of=None): - if shuffle: - sample_ordering = np.random.permutation(len(dataset)) - else: - sample_ordering = np.arange(len(dataset)) - for sample_idx in sample_ordering: - example = dataset[int(sample_idx)] - # Handle dicts with proper padding and conversion to tensor. - example = tokenizer.pad(example, return_tensors="np", pad_to_multiple_of=pad_to_multiple_of) - example = {key: tf.convert_to_tensor(arr, dtype_hint=tf.int32) for key, arr in example.items()} - if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"): - decoder_input_ids = model.prepare_decoder_input_ids_from_labels( - labels=tf.expand_dims(example["labels"], 0) - ) - example["decoder_input_ids"] = tf.squeeze(decoder_input_ids, 0) - yield example, example["labels"] # TF needs some kind of labels, even if we don't use them - return - - -# endregion - - -# region Helper functions -def dataset_to_tf(dataset, model, tokenizer, total_batch_size, num_epochs, shuffle): - if dataset is None: - return None - train_generator = partial(sample_generator, dataset, model, tokenizer, shuffle=shuffle) - train_signature = { - feature: tf.TensorSpec(shape=(None,), dtype=tf.int32) - for feature in dataset.features - if feature != "special_tokens_mask" - } - if ( - model is not None - and "decoder_input_ids" not in train_signature - and hasattr(model, "prepare_decoder_input_ids_from_labels") - ): - train_signature["decoder_input_ids"] = train_signature["labels"] - # This may need to be changed depending on your particular model or tokenizer! - padding_values = { - key: tf.convert_to_tensor(tokenizer.pad_token_id if tokenizer.pad_token_id is not None else 0, dtype=tf.int32) - for key in train_signature.keys() - } - padding_values["labels"] = tf.convert_to_tensor(-100, dtype=tf.int32) - train_signature["labels"] = train_signature["input_ids"] - train_signature = (train_signature, train_signature["labels"]) - options = tf.data.Options() - options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF - tf_dataset = ( - tf.data.Dataset.from_generator(train_generator, output_signature=train_signature) - .with_options(options) - .padded_batch( - batch_size=total_batch_size, - drop_remainder=True, - padding_values=(padding_values, np.array(-100, dtype=np.int32)), - ) - .repeat(int(num_epochs)) - ) - return tf_dataset - - # endregion @@ -541,67 +489,149 @@ def preprocess_function(examples): # endregion # region Prepare TF Dataset objects + label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id + data_collator = DataCollatorForSeq2Seq( + tokenizer, + model=model, + label_pad_token_id=label_pad_token_id, + pad_to_multiple_of=64, # Reduce the number of unique shapes for XLA, especially for generation + return_tensors="tf", + ) num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas - tf_train_dataset = dataset_to_tf( + + dataset_options = tf.data.Options() + dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF + + # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in + # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also + # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names + # yourself if you use this method, whereas they are automatically inferred from the model input names when + # using model.prepare_tf_dataset() + # For more info see the docs: + # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset + # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset + + tf_train_dataset = model.prepare_tf_dataset( train_dataset, - model, - tokenizer, - total_batch_size=total_train_batch_size, - num_epochs=training_args.num_train_epochs, + collate_fn=data_collator, + batch_size=total_train_batch_size, shuffle=True, - ) - tf_eval_dataset = dataset_to_tf( - eval_dataset, - model, - tokenizer, - total_eval_batch_size, - num_epochs=1, - shuffle=False, - ) + ).with_options(dataset_options) + tf_eval_dataset = model.prepare_tf_dataset( + eval_dataset, collate_fn=data_collator, batch_size=total_eval_batch_size, shuffle=False + ).with_options(dataset_options) # endregion - # region Optimizer, loss and LR scheduling - # Scheduler and math around the number of training steps. - num_update_steps_per_epoch = len(train_dataset) // training_args.per_device_train_batch_size - num_train_steps = training_args.num_train_epochs * num_update_steps_per_epoch - optimizer, lr_schedule = create_optimizer( - init_lr=training_args.learning_rate, - num_train_steps=num_train_steps, - num_warmup_steps=training_args.warmup_steps, - ) - - def masked_sparse_categorical_crossentropy(y_true, y_pred): - # We clip the negative labels to 0 to avoid NaNs appearing in the output and - # fouling up everything that comes afterwards. The loss values corresponding to clipped values - # will be masked later anyway, but even masked NaNs seem to cause overflows for some reason. - # 1e6 is chosen as a reasonable upper bound for the number of token indices - in the unlikely - # event that you have more than 1 million tokens in your vocabulary, consider increasing this value. - # More pragmatically, consider redesigning your tokenizer. - losses = tf.keras.losses.sparse_categorical_crossentropy( - tf.clip_by_value(y_true, 0, int(1e6)), y_pred, from_logits=True + # region Optimizer and LR scheduling + num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) + if training_args.warmup_steps > 0: + num_warmup_steps = training_args.warmup_steps + elif training_args.warmup_ratio > 0: + num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) + else: + num_warmup_steps = 0 + if training_args.do_train: + optimizer, lr_schedule = create_optimizer( + init_lr=training_args.learning_rate, + num_train_steps=num_train_steps, + num_warmup_steps=num_warmup_steps, + adam_beta1=training_args.adam_beta1, + adam_beta2=training_args.adam_beta2, + adam_epsilon=training_args.adam_epsilon, + weight_decay_rate=training_args.weight_decay, + adam_global_clipnorm=training_args.max_grad_norm, ) - # Compute the per-sample loss only over the unmasked tokens - losses = tf.ragged.boolean_mask(losses, y_true != -100) - losses = tf.reduce_mean(losses, axis=-1) - return losses - + else: + optimizer = None # endregion # region Metric and postprocessing - metric = evaluate.load("sacrebleu") + if training_args.do_eval: + metric = evaluate.load("sacrebleu") - def postprocess_text(preds, labels): - preds = [pred.strip() for pred in preds] - labels = [[label.strip()] for label in labels] + if data_args.val_max_target_length is None: + data_args.val_max_target_length = data_args.max_target_length + + gen_kwargs = { + "max_length": data_args.val_max_target_length, + "num_beams": data_args.num_beams, + "no_repeat_ngram_size": 0, # Not supported under XLA right now, and some models set it by default + } + + def postprocess_text(preds, labels): + preds = [pred.strip() for pred in preds] + labels = [[label.strip()] for label in labels] + + return preds, labels + + def compute_metrics(preds): + predictions, labels = preds + if isinstance(predictions, tuple): + predictions = predictions[0] + decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) + labels = np.where(labels != -100, labels, tokenizer.pad_token_id) + decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) + decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) + metrics = metric.compute(predictions=decoded_preds, references=decoded_labels) + return {"bleu": metrics["score"]} + + # The KerasMetricCallback allows metrics that are too complex to write as standard Keras metrics + # to be computed each epoch. Any Python code can be included in the metric_fn. This is especially + # useful for metrics like BLEU and ROUGE that perform string comparisons on decoded model outputs. + # For more information, see the docs at + # https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.KerasMetricCallback + + metric_callback = KerasMetricCallback( + metric_fn=compute_metrics, + eval_dataset=tf_eval_dataset, + predict_with_generate=True, + use_xla_generation=True, + generate_kwargs=gen_kwargs, + ) + callbacks = [metric_callback] + else: + callbacks = [] - return preds, labels + # endregion + # region Preparing push_to_hub and model card + push_to_hub_model_id = training_args.push_to_hub_model_id + model_name = model_args.model_name_or_path.split("/")[-1] + if not push_to_hub_model_id: + push_to_hub_model_id = f"{model_name}-finetuned-{data_args.source_lang}-{data_args.target_lang}" + + model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"} + if data_args.dataset_name is not None: + model_card_kwargs["dataset_tags"] = data_args.dataset_name + if data_args.dataset_config_name is not None: + model_card_kwargs["dataset_args"] = data_args.dataset_config_name + model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" + else: + model_card_kwargs["dataset"] = data_args.dataset_name + + languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None] + if len(languages) > 0: + model_card_kwargs["language"] = languages + + if training_args.push_to_hub: + # Because this training can be quite long, we save once per epoch. + callbacks.append( + PushToHubCallback( + output_dir=training_args.output_dir, + model_id=push_to_hub_model_id, + organization=training_args.push_to_hub_organization, + token=training_args.push_to_hub_token, + tokenizer=tokenizer, + **model_card_kwargs, + ) + ) # endregion # region Training - model.compile(loss={"logits": masked_sparse_categorical_crossentropy}, optimizer=optimizer) + eval_metrics = None + model.compile(optimizer=optimizer, jit_compile=training_args.xla) if training_args.do_train: logger.info("***** Running training *****") @@ -611,41 +641,48 @@ def postprocess_text(preds, labels): logger.info(f" Total train batch size = {total_train_batch_size}") logger.info(f" Total optimization steps = {num_train_steps}") - model.fit( - tf_train_dataset, - epochs=int(training_args.num_train_epochs), - steps_per_epoch=num_update_steps_per_epoch, - ) + if training_args.xla and not data_args.pad_to_max_length: + logger.warning( + "XLA training may be slow at first when --pad_to_max_length is not set " + "until all possible shapes have been compiled." + ) + + history = model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) + eval_metrics = {key: val[-1] for key, val in history.history.items()} # endregion # region Validation - if data_args.val_max_target_length is None: - data_args.val_max_target_length = data_args.max_target_length - - gen_kwargs = { - "max_length": data_args.val_max_target_length, - "num_beams": data_args.num_beams, - } - if training_args.do_eval: - logger.info("Evaluation...") - for batch, labels in tqdm( - tf_eval_dataset, total=len(eval_dataset) // training_args.per_device_eval_batch_size - ): - batch.update(gen_kwargs) - generated_tokens = model.generate(**batch) - if isinstance(generated_tokens, tuple): - generated_tokens = generated_tokens[0] - decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) - labels = np.where(labels != -100, labels, tokenizer.pad_token_id) - decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) - decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) - - metric.add_batch(predictions=decoded_preds, references=decoded_labels) - eval_metric = metric.compute() - logger.info({"bleu": eval_metric["score"]}) + if training_args.do_eval and not training_args.do_train: + # Compiling generation with XLA yields enormous speedups, see https://huggingface.co/blog/tf-xla-generate + @tf.function(jit_compile=True) + def generate(**kwargs): + return model.generate(**kwargs) + + if training_args.do_eval: + logger.info("Evaluation...") + for batch, labels in tf_eval_dataset: + batch.update(gen_kwargs) + generated_tokens = generate(**batch) + if isinstance(generated_tokens, tuple): + generated_tokens = generated_tokens[0] + decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) + labels = np.where(labels != -100, labels, tokenizer.pad_token_id) + decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) + decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) + + metric.add_batch(predictions=decoded_preds, references=decoded_labels) + + eval_metrics = metric.compute() + logger.info({"bleu": eval_metrics["score"]}) # endregion - if training_args.output_dir is not None: + if training_args.output_dir is not None and eval_metrics is not None: + output_eval_file = os.path.join(training_args.output_dir, "all_results.json") + with open(output_eval_file, "w") as writer: + writer.write(json.dumps(eval_metrics)) + + if training_args.output_dir is not None and not training_args.push_to_hub: + # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) diff --git a/src/transformers/optimization_tf.py b/src/transformers/optimization_tf.py index 345b2eaf1f3aa8..e2b2a961ca1984 100644 --- a/src/transformers/optimization_tf.py +++ b/src/transformers/optimization_tf.py @@ -87,6 +87,8 @@ def create_optimizer( adam_beta1: float = 0.9, adam_beta2: float = 0.999, adam_epsilon: float = 1e-8, + adam_clipnorm: Optional[float] = None, + adam_global_clipnorm: Optional[float] = None, weight_decay_rate: float = 0.0, power: float = 1.0, include_in_weight_decay: Optional[List[str]] = None, @@ -109,6 +111,11 @@ def create_optimizer( The beta2 to use in Adam. adam_epsilon (`float`, *optional*, defaults to 1e-8): The epsilon to use in Adam. + adam_clipnorm: (`float`, *optional*, defaults to `None`): + If not `None`, clip the gradient norm for each weight tensor to this value. + adam_global_clipnorm: (`float`, *optional*, defaults to `None`) + If not `None`, clip gradient norm to this value. When using this argument, the norm is computed over all + weight tensors, as if they were concatenated into a single vector. weight_decay_rate (`float`, *optional*, defaults to 0): The weight decay to use. power (`float`, *optional*, defaults to 1.0): @@ -137,12 +144,19 @@ def create_optimizer( beta_1=adam_beta1, beta_2=adam_beta2, epsilon=adam_epsilon, + clipnorm=adam_clipnorm, + global_clipnorm=adam_global_clipnorm, exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"], include_in_weight_decay=include_in_weight_decay, ) else: optimizer = tf.keras.optimizers.Adam( - learning_rate=lr_schedule, beta_1=adam_beta1, beta_2=adam_beta2, epsilon=adam_epsilon + learning_rate=lr_schedule, + beta_1=adam_beta1, + beta_2=adam_beta2, + epsilon=adam_epsilon, + clipnorm=adam_clipnorm, + global_clipnorm=adam_global_clipnorm, ) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index e662d6fca4fdaa..e9a9f8f0043a79 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -106,6 +106,7 @@ class OptimizerNames(ExplicitEnum): @dataclass class TrainingArguments: + framework = "pt" """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. @@ -1039,25 +1040,25 @@ def __post_init__(self): self.greater_is_better = self.metric_for_best_model not in ["loss", "eval_loss"] if self.run_name is None: self.run_name = self.output_dir + if self.framework == "pt" and is_torch_available(): + if self.fp16_backend and self.fp16_backend != "auto": + warnings.warn( + "`fp16_backend` is deprecated and will be removed in version 5 of 🤗 Transformers. Use" + " `half_precision_backend` instead", + FutureWarning, + ) + self.half_precision_backend = self.fp16_backend - if self.fp16_backend and self.fp16_backend != "auto": - warnings.warn( - "`fp16_backend` is deprecated and will be removed in version 5 of 🤗 Transformers. Use" - " `half_precision_backend` instead", - FutureWarning, - ) - self.half_precision_backend = self.fp16_backend - - if self.bf16 or self.bf16_full_eval: + if self.bf16 or self.bf16_full_eval: - if self.no_cuda and not is_torch_bf16_cpu_available(): - # cpu - raise ValueError("Your setup doesn't support bf16/cpu. You need torch>=1.10") - elif not self.no_cuda and not is_torch_bf16_gpu_available(): - # gpu - raise ValueError( - "Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0" - ) + if self.no_cuda and not is_torch_bf16_cpu_available(): + # cpu + raise ValueError("Your setup doesn't support bf16/cpu. You need torch>=1.10") + elif not self.no_cuda and not is_torch_bf16_gpu_available(): + # gpu + raise ValueError( + "Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0" + ) if self.fp16 and self.bf16: raise ValueError("At most one of fp16 and bf16 can be True, but not both") @@ -1084,7 +1085,8 @@ def __post_init__(self): self.optim = OptimizerNames.ADAFACTOR if ( - is_torch_available() + self.framework == "pt" + and is_torch_available() and (self.device.type != "cuda") and not (self.device.type == "xla" and "GPU_NUM_DEVICES" in os.environ) and (self.fp16 or self.fp16_full_eval) @@ -1095,7 +1097,8 @@ def __post_init__(self): ) if ( - is_torch_available() + self.framework == "pt" + and is_torch_available() and (self.device.type != "cuda") and not (self.device.type == "xla" and "GPU_NUM_DEVICES" in os.environ) and (self.device.type != "cpu") @@ -1106,7 +1109,7 @@ def __post_init__(self): " (`--bf16_full_eval`) can only be used on CUDA or CPU devices." ) - if is_torch_available() and self.tf32 is not None: + if self.framework == "pt" and is_torch_available() and self.tf32 is not None: if self.tf32: if is_torch_tf32_available(): torch.backends.cuda.matmul.allow_tf32 = True diff --git a/src/transformers/training_args_tf.py b/src/transformers/training_args_tf.py index 060b78e9220518..fdae51f72d4b56 100644 --- a/src/transformers/training_args_tf.py +++ b/src/transformers/training_args_tf.py @@ -28,6 +28,7 @@ @dataclass class TFTrainingArguments(TrainingArguments): + framework = "tf" """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. @@ -188,9 +189,6 @@ class TFTrainingArguments(TrainingArguments): def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", int]: logger.info("Tensorflow: setting up strategy") - if self.xla: - tf.config.optimizer.set_jit(True) - gpus = tf.config.list_physical_devices("GPU") # Set to float16 at first From 0d0aada56444ad554021947addaa035feb55948f Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 10 Aug 2022 11:55:18 -0400 Subject: [PATCH 065/539] Use commit hash to look in cache instead of calling head (#18534) * Use commit hash to look in cache instead of calling head * Add tests * Add attr for local configs too * Stupid typos * Fix tests * Update src/transformers/utils/hub.py Co-authored-by: Julien Chaumond * Address Julien's comments Co-authored-by: Julien Chaumond --- src/transformers/configuration_utils.py | 23 +++++++- src/transformers/modeling_flax_utils.py | 7 +++ src/transformers/modeling_tf_utils.py | 7 +++ src/transformers/modeling_utils.py | 6 ++ .../models/auto/tokenization_auto.py | 15 ++++- src/transformers/pipelines/__init__.py | 11 +++- src/transformers/testing_utils.py | 28 +++++++++ src/transformers/tokenization_utils_base.py | 16 +++++- src/transformers/utils/__init__.py | 1 + src/transformers/utils/hub.py | 57 +++++++++++++++---- tests/models/auto/test_modeling_auto.py | 19 +++++++ tests/models/auto/test_modeling_tf_auto.py | 19 +++++++ tests/models/auto/test_tokenization_auto.py | 12 ++++ tests/pipelines/test_pipelines_common.py | 11 ++++ tests/test_configuration_common.py | 12 ++-- 15 files changed, 221 insertions(+), 23 deletions(-) diff --git a/src/transformers/configuration_utils.py b/src/transformers/configuration_utils.py index b924cec9ae021c..41503255ac2adb 100755 --- a/src/transformers/configuration_utils.py +++ b/src/transformers/configuration_utils.py @@ -27,7 +27,15 @@ from . import __version__ from .dynamic_module_utils import custom_object_save -from .utils import CONFIG_NAME, PushToHubMixin, cached_file, copy_func, is_torch_available, logging +from .utils import ( + CONFIG_NAME, + PushToHubMixin, + cached_file, + copy_func, + extract_commit_hash, + is_torch_available, + logging, +) logger = logging.get_logger(__name__) @@ -343,6 +351,8 @@ def __init__(self, **kwargs): # Name or path to the pretrained checkpoint self._name_or_path = str(kwargs.pop("name_or_path", "")) + # Config hash + self._commit_hash = kwargs.pop("_commit_hash", None) # Drop the transformers version info self.transformers_version = kwargs.pop("transformers_version", None) @@ -539,6 +549,8 @@ def get_config_dict( original_kwargs = copy.deepcopy(kwargs) # Get config dict associated with the base config file config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs) + if "_commit_hash" in config_dict: + original_kwargs["_commit_hash"] = config_dict["_commit_hash"] # That config file may point us toward another config file to use. if "configuration_files" in config_dict: @@ -564,6 +576,7 @@ def _get_config_dict( subfolder = kwargs.pop("subfolder", "") from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) + commit_hash = kwargs.pop("_commit_hash", None) if trust_remote_code is True: logger.warning( @@ -599,7 +612,9 @@ def _get_config_dict( user_agent=user_agent, revision=revision, subfolder=subfolder, + _commit_hash=commit_hash, ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) except EnvironmentError: # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to # the original exception. @@ -616,6 +631,7 @@ def _get_config_dict( try: # Load config dict config_dict = cls._dict_from_json_file(resolved_config_file) + config_dict["_commit_hash"] = commit_hash except (json.JSONDecodeError, UnicodeDecodeError): raise EnvironmentError( f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file." @@ -648,6 +664,9 @@ def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig": # We remove them so they don't appear in `return_unused_kwargs`. kwargs.pop("_from_auto", None) kwargs.pop("_from_pipeline", None) + # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update. + if "_commit_hash" in kwargs and "_commit_hash" in config_dict: + kwargs["_commit_hash"] = config_dict["_commit_hash"] config = cls(**config_dict) @@ -751,6 +770,8 @@ def to_dict(self) -> Dict[str, Any]: output["model_type"] = self.__class__.model_type if "_auto_class" in output: del output["_auto_class"] + if "_commit_hash" in output: + del output["_commit_hash"] # Transformers version when serializing the model output["transformers_version"] = __version__ diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index af75b418cad23e..683e25631c0f44 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -595,6 +595,7 @@ def from_pretrained( from_auto_class = kwargs.pop("_from_auto", False) _do_init = kwargs.pop("_do_init", True) subfolder = kwargs.pop("subfolder", "") + commit_hash = kwargs.pop("_commit_hash", None) if trust_remote_code is True: logger.warning( @@ -625,11 +626,15 @@ def from_pretrained( revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, + _commit_hash=commit_hash, **kwargs, ) else: model_kwargs = kwargs + if commit_hash is None: + commit_hash = getattr(config, "_commit_hash", None) + # Add the dtype to model_kwargs model_kwargs["dtype"] = dtype @@ -682,6 +687,7 @@ def from_pretrained( revision=revision, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, + _commit_hash=commit_hash, ) resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) @@ -748,6 +754,7 @@ def from_pretrained( use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, + _commit_hash=commit_hash, ) # init random models diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 68ee4117a2f9db..3587354b9326a9 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -2161,6 +2161,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) subfolder = kwargs.pop("subfolder", "") + commit_hash = kwargs.pop("_commit_hash", None) if trust_remote_code is True: logger.warning( @@ -2191,11 +2192,15 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): revision=revision, _from_auto=from_auto_class, _from_pipeline=from_pipeline, + _commit_hash=commit_hash, **kwargs, ) else: model_kwargs = kwargs + if commit_hash is None: + commit_hash = getattr(config, "_commit_hash", None) + # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. is_sharded = False @@ -2253,6 +2258,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): revision=revision, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, + _commit_hash=commit_hash, ) resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) @@ -2320,6 +2326,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, + _commit_hash=commit_hash, ) config.name_or_path = pretrained_model_name_or_path diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 1d895baecfedac..d77258c94ea089 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1840,6 +1840,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P load_in_8bit = kwargs.pop("load_in_8bit", False) int8_threshold = kwargs.pop("int8_threshold", 6.0) subfolder = kwargs.pop("subfolder", "") + commit_hash = kwargs.pop("_commit_hash", None) if trust_remote_code is True: logger.warning( @@ -1918,6 +1919,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P else: model_kwargs = kwargs + if commit_hash is None: + commit_hash = getattr(config, "_commit_hash", None) + # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the # index of the files. is_sharded = False @@ -2004,6 +2008,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P revision=revision, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, + _commit_hash=commit_hash, ) resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) @@ -2078,6 +2083,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P user_agent=user_agent, revision=revision, subfolder=subfolder, + _commit_hash=commit_hash, ) # load pt weights early so that we know which dtype to init the model under diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index d8759fd4e7842e..8ece13b79fe3fa 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -25,7 +25,7 @@ from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE from ...tokenization_utils_fast import PreTrainedTokenizerFast -from ...utils import get_file_from_repo, is_sentencepiece_available, is_tokenizers_available, logging +from ...utils import cached_file, extract_commit_hash, is_sentencepiece_available, is_tokenizers_available, logging from ..encoder_decoder import EncoderDecoderConfig from .auto_factory import _LazyAutoMapping from .configuration_auto import ( @@ -389,7 +389,8 @@ def get_tokenizer_config( tokenizer.save_pretrained("tokenizer-test") tokenizer_config = get_tokenizer_config("tokenizer-test") ```""" - resolved_config_file = get_file_from_repo( + commit_hash = kwargs.get("_commit_hash", None) + resolved_config_file = cached_file( pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, cache_dir=cache_dir, @@ -399,13 +400,19 @@ def get_tokenizer_config( use_auth_token=use_auth_token, revision=revision, local_files_only=local_files_only, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + _commit_hash=commit_hash, ) if resolved_config_file is None: logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.") return {} + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) with open(resolved_config_file, encoding="utf-8") as reader: - return json.load(reader) + result = json.load(reader) + result["_commit_hash"] = commit_hash + return result class AutoTokenizer: @@ -532,6 +539,8 @@ def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs): # Next, let's try to use the tokenizer_config file to get the tokenizer class. tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs) + if "_commit_hash" in tokenizer_config: + kwargs["_commit_hash"] = tokenizer_config["_commit_hash"] config_tokenizer_class = tokenizer_config.get("tokenizer_class") tokenizer_auto_map = None if "auto_map" in tokenizer_config: diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index dfa75768d8f811..5752790aa9614b 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -557,7 +557,12 @@ def pipeline( # Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs, # this is to keep BC). use_auth_token = model_kwargs.pop("use_auth_token", use_auth_token) - hub_kwargs = {"revision": revision, "use_auth_token": use_auth_token, "trust_remote_code": trust_remote_code} + hub_kwargs = { + "revision": revision, + "use_auth_token": use_auth_token, + "trust_remote_code": trust_remote_code, + "_commit_hash": None, + } if task is None and model is None: raise RuntimeError( @@ -583,8 +588,10 @@ def pipeline( # Instantiate config if needed if isinstance(config, str): config = AutoConfig.from_pretrained(config, _from_pipeline=task, **hub_kwargs, **model_kwargs) + hub_kwargs["_commit_hash"] = config._commit_hash elif config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) + hub_kwargs["_commit_hash"] = config._commit_hash custom_tasks = {} if config is not None and len(getattr(config, "custom_pipelines", {})) > 0: @@ -639,6 +646,7 @@ def pipeline( ) if config is None and isinstance(model, str): config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs) + hub_kwargs["_commit_hash"] = config._commit_hash if device_map is not None: if "device_map" in model_kwargs: @@ -672,6 +680,7 @@ def pipeline( ) model_config = model.config + hub_kwargs["_commit_hash"] = model.config._commit_hash load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 80f7bf9c863c87..d21f353a60a8f5 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -31,6 +31,7 @@ from typing import Iterator, List, Union from unittest import mock +import huggingface_hub from transformers import logging as transformers_logging from .deepspeed import is_deepspeed_available @@ -1588,3 +1589,30 @@ def run_command(command: List[str], return_stdout=False): raise SubprocessCallException( f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}" ) from e + + +class RequestCounter: + """ + Helper class that will count all requests made online. + """ + + def __enter__(self): + self.head_request_count = 0 + self.get_request_count = 0 + self.other_request_count = 0 + self.old_request = huggingface_hub.file_download.requests.request + huggingface_hub.file_download.requests.request = self.new_request + return self + + def __exit__(self, *args, **kwargs): + huggingface_hub.file_download.requests.request = self.old_request + + def new_request(self, method, **kwargs): + if method == "GET": + self.get_request_count += 1 + elif method == "HEAD": + self.head_request_count += 1 + else: + self.other_request_count += 1 + + return self.old_request(method=method, **kwargs) diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index f85dc73cb659cb..566fd3fbf92b05 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -42,7 +42,7 @@ add_end_docstrings, cached_file, copy_func, - get_file_from_repo, + extract_commit_hash, is_flax_available, is_offline_mode, is_tf_available, @@ -1651,6 +1651,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], subfolder = kwargs.pop("subfolder", None) from_pipeline = kwargs.pop("_from_pipeline", None) from_auto_class = kwargs.pop("_from_auto", False) + commit_hash = kwargs.pop("_commit_hash", None) user_agent = {"file_type": "tokenizer", "from_auto_class": from_auto_class, "is_fast": "Fast" in cls.__name__} if from_pipeline is not None: @@ -1690,7 +1691,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], if "tokenizer_file" in vocab_files: # Try to get the tokenizer config to see if there are versioned tokenizer files. fast_tokenizer_file = FULL_TOKENIZER_FILE - resolved_config_file = get_file_from_repo( + resolved_config_file = cached_file( pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, cache_dir=cache_dir, @@ -1701,7 +1702,12 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], revision=revision, local_files_only=local_files_only, subfolder=subfolder, + user_agent=user_agent, + _raise_exceptions_for_missing_entries=False, + _raise_exceptions_for_connection_errors=False, + _commit_hash=commit_hash, ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) if resolved_config_file is not None: with open(resolved_config_file, encoding="utf-8") as reader: tokenizer_config = json.load(reader) @@ -1730,7 +1736,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], subfolder=subfolder, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, + _commit_hash=commit_hash, ) + commit_hash = extract_commit_hash(resolved_vocab_files[file_id], commit_hash) if len(unresolved_files) > 0: logger.info( @@ -1763,6 +1771,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], use_auth_token=use_auth_token, cache_dir=cache_dir, local_files_only=local_files_only, + _commit_hash=commit_hash, **kwargs, ) @@ -1776,6 +1785,7 @@ def _from_pretrained( use_auth_token=None, cache_dir=None, local_files_only=False, + _commit_hash=None, **kwargs ): # We instantiate fast tokenizers based on a slow tokenizer if we don't have access to the tokenizer.json @@ -1791,6 +1801,7 @@ def _from_pretrained( use_auth_token=use_auth_token, cache_dir=cache_dir, local_files_only=local_files_only, + _commit_hash=_commit_hash, **(copy.deepcopy(kwargs)), ) else: @@ -1823,6 +1834,7 @@ def _from_pretrained( use_auth_token=use_auth_token, cache_dir=cache_dir, local_files_only=local_files_only, + _commit_hash=_commit_hash, ) config_tokenizer_class = config.tokenizer_class except (OSError, ValueError, KeyError): diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 27276aa4946d5e..2a2a4c41257492 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -63,6 +63,7 @@ cached_file, default_cache_path, define_sagemaker_information, + extract_commit_hash, get_cached_models, get_file_from_repo, get_full_repo_name, diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 07164e735db901..00f9c277c41773 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -38,6 +38,7 @@ whoami, ) from huggingface_hub.constants import HUGGINGFACE_HEADER_X_LINKED_ETAG, HUGGINGFACE_HEADER_X_REPO_COMMIT +from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError from requests.exceptions import HTTPError from transformers.utils.logging import tqdm @@ -200,11 +201,27 @@ def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: return ua -def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None): +def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]): + """ + Extracts the commit hash from a resolved filename toward a cache file. + """ + if resolved_file is None or commit_hash is not None: + return commit_hash + + search = re.search(r"snapshots/([^/]+)/", resolved_file) + if search is None: + return None + commit_hash = search.groups()[0] + return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None + + +def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None, commit_hash=None): """ Explores the cache to return the latest cached file for a given revision. """ - if revision is None: + if commit_hash is not None and revision is not None: + raise ValueError("`commit_hash` and `revision` are mutually exclusive, pick one only.") + if revision is None and commit_hash is None: revision = "main" model_id = repo_id.replace("/", "--") @@ -216,18 +233,19 @@ def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None): if not os.path.isdir(os.path.join(model_cache, subfolder)): return None - # Resolve refs (for instance to convert main to the associated commit sha) - cached_refs = os.listdir(os.path.join(model_cache, "refs")) - if revision in cached_refs: - with open(os.path.join(model_cache, "refs", revision)) as f: - revision = f.read() + if commit_hash is None: + # Resolve refs (for instance to convert main to the associated commit sha) + cached_refs = os.listdir(os.path.join(model_cache, "refs")) + if revision in cached_refs: + with open(os.path.join(model_cache, "refs", revision)) as f: + commit_hash = f.read() cached_shas = os.listdir(os.path.join(model_cache, "snapshots")) - if revision not in cached_shas: + if commit_hash not in cached_shas: # No cache for this revision and we won't try to return a random revision return None - cached_file = os.path.join(model_cache, "snapshots", revision, filename) + cached_file = os.path.join(model_cache, "snapshots", commit_hash, filename) return cached_file if os.path.isfile(cached_file) else None @@ -265,8 +283,9 @@ def cached_file( local_files_only: bool = False, subfolder: str = "", user_agent: Optional[Union[str, Dict[str, str]]] = None, - _raise_exceptions_for_missing_entries=True, - _raise_exceptions_for_connection_errors=True, + _raise_exceptions_for_missing_entries: bool = True, + _raise_exceptions_for_connection_errors: bool = True, + _commit_hash: Optional[str] = None, ): """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. @@ -318,6 +337,13 @@ def cached_file( # Download a model weight from the Hub and cache it. model_weights_file = cached_file("bert-base-uncased", "pytorch_model.bin") ```""" + # Private arguments + # _raise_exceptions_for_missing_entries: if False, do not raise an exception for missing entries but return + # None. + # _raise_exceptions_for_connection_errors: if False, do not raise an exception for connection errors but return + # None. + # _commit_hash: passed when we are chaining several calls to various files (e.g. when loading a tokenizer or + # a pipeline). If files are cached for this commit hash, avoid calls to head and get from the cache. if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True @@ -339,6 +365,13 @@ def cached_file( cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) + + if _commit_hash is not None: + # If the file is cached under that commit hash, we return it directly. + resolved_file = try_to_load_from_cache(cache_dir, path_or_repo_id, full_filename, commit_hash=_commit_hash) + if resolved_file is not None: + return resolved_file + user_agent = http_user_agent(user_agent) try: # Load from URL or cache if already cached @@ -803,6 +836,7 @@ def get_checkpoint_shard_files( user_agent=None, revision=None, subfolder="", + _commit_hash=None, ): """ For a given model: @@ -848,6 +882,7 @@ def get_checkpoint_shard_files( user_agent=user_agent, revision=revision, subfolder=subfolder, + _commit_hash=_commit_hash, ) # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so # we don't have to catch them here. diff --git a/tests/models/auto/test_modeling_auto.py b/tests/models/auto/test_modeling_auto.py index 3731d70f5bb5af..2e1e51a81daac6 100644 --- a/tests/models/auto/test_modeling_auto.py +++ b/tests/models/auto/test_modeling_auto.py @@ -24,6 +24,7 @@ from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, + RequestCounter, require_scatter, require_torch, slow, @@ -354,3 +355,21 @@ def test_model_from_tf_suggestion(self): def test_model_from_flax_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_flax=True` to load this model"): _ = AutoModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + + def test_cached_model_has_minimum_calls_to_head(self): + # Make sure we have cached the model. + _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") + with RequestCounter() as counter: + _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") + self.assertEqual(counter.get_request_count, 0) + self.assertEqual(counter.head_request_count, 1) + self.assertEqual(counter.other_request_count, 0) + + # With a sharded checkpoint + _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") + with RequestCounter() as counter: + _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") + self.assertEqual(counter.get_request_count, 0) + # There is no pytorch_model.bin so we still get one call for this one. + self.assertEqual(counter.head_request_count, 2) + self.assertEqual(counter.other_request_count, 0) diff --git a/tests/models/auto/test_modeling_tf_auto.py b/tests/models/auto/test_modeling_tf_auto.py index a803a3451107e2..bbde4f582bdfb0 100644 --- a/tests/models/auto/test_modeling_tf_auto.py +++ b/tests/models/auto/test_modeling_tf_auto.py @@ -21,6 +21,7 @@ from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, + RequestCounter, require_tensorflow_probability, require_tf, slow, @@ -287,3 +288,21 @@ def test_model_file_not_found(self): def test_model_from_pt_suggestion(self): with self.assertRaisesRegex(EnvironmentError, "Use `from_pt=True` to load this model"): _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only") + + def test_cached_model_has_minimum_calls_to_head(self): + # Make sure we have cached the model. + _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") + with RequestCounter() as counter: + _ = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert") + self.assertEqual(counter.get_request_count, 0) + self.assertEqual(counter.head_request_count, 1) + self.assertEqual(counter.other_request_count, 0) + + # With a sharded checkpoint + _ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") + with RequestCounter() as counter: + _ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") + self.assertEqual(counter.get_request_count, 0) + # There is no pytorch_model.bin so we still get one call for this one. + self.assertEqual(counter.head_request_count, 2) + self.assertEqual(counter.other_request_count, 0) diff --git a/tests/models/auto/test_tokenization_auto.py b/tests/models/auto/test_tokenization_auto.py index 1e1abb9245842c..830362e29cd654 100644 --- a/tests/models/auto/test_tokenization_auto.py +++ b/tests/models/auto/test_tokenization_auto.py @@ -48,6 +48,7 @@ DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, + RequestCounter, require_tokenizers, slow, ) @@ -213,6 +214,7 @@ def test_auto_tokenizer_fast_no_slow(self): def test_get_tokenizer_config(self): # Check we can load the tokenizer config of an online model. config = get_tokenizer_config("bert-base-cased") + _ = config.pop("_commit_hash", None) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(config, {"do_lower_case": False}) @@ -340,3 +342,13 @@ def test_revision_not_found(self): EnvironmentError, r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): _ = AutoTokenizer.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, revision="aaaaaa") + + def test_cached_tokenizer_has_minimum_calls_to_head(self): + # Make sure we have cached the tokenizer. + _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") + with RequestCounter() as counter: + _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") + self.assertEqual(counter.get_request_count, 0) + # We still have one extra call because the model does not have a added_tokens.json file + self.assertEqual(counter.head_request_count, 2) + self.assertEqual(counter.other_request_count, 0) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 5d5c8fa2333eb6..5e0296c7136725 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -49,6 +49,7 @@ TOKEN, USER, CaptureLogger, + RequestCounter, is_pipeline_test, is_staging_test, nested_simplify, @@ -877,6 +878,16 @@ def test_dynamic_pipeline(self): [{"label": "LABEL_0", "score": 0.505}], ) + def test_cached_pipeline_has_minimum_calls_to_head(self): + # Make sure we have cached the pipeline. + _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") + with RequestCounter() as counter: + _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") + self.assertEqual(counter.get_request_count, 0) + # We still have one extra call because the model does not have a added_tokens.json file + self.assertEqual(counter.head_request_count, 2) + self.assertEqual(counter.other_request_count, 0) + @require_torch @is_staging_test diff --git a/tests/test_configuration_common.py b/tests/test_configuration_common.py index 397346c7deec77..5447fb6afb70eb 100644 --- a/tests/test_configuration_common.py +++ b/tests/test_configuration_common.py @@ -246,7 +246,7 @@ def test_push_to_hub(self): config.push_to_hub("test-config", use_auth_token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") - for k, v in config.__dict__.items(): + for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) @@ -258,7 +258,7 @@ def test_push_to_hub(self): config.save_pretrained(tmp_dir, repo_id="test-config", push_to_hub=True, use_auth_token=self._token) new_config = BertConfig.from_pretrained(f"{USER}/test-config") - for k, v in config.__dict__.items(): + for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) @@ -269,7 +269,7 @@ def test_push_to_hub_in_organization(self): config.push_to_hub("valid_org/test-config-org", use_auth_token=self._token) new_config = BertConfig.from_pretrained("valid_org/test-config-org") - for k, v in config.__dict__.items(): + for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) @@ -283,7 +283,7 @@ def test_push_to_hub_in_organization(self): ) new_config = BertConfig.from_pretrained("valid_org/test-config-org") - for k, v in config.__dict__.items(): + for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(v, getattr(new_config, k)) @@ -323,7 +323,9 @@ def test_config_common_kwargs_is_complete(self): base_config = PretrainedConfig() missing_keys = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. - self.assertListEqual(missing_keys, ["is_encoder_decoder", "_name_or_path", "transformers_version"]) + self.assertListEqual( + missing_keys, ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] + ) keys_with_defaults = [key for key, value in config_common_kwargs.items() if value == getattr(base_config, key)] if len(keys_with_defaults) > 0: raise ValueError( From 9d4a45509ab352c5a3e1113283cbae607133c0e8 Mon Sep 17 00:00:00 2001 From: Julien Chaumond Date: Wed, 10 Aug 2022 18:52:15 +0200 Subject: [PATCH 066/539] `pipeline` support for `device="mps"` (or any other string) (#18494) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * `pipeline` support for `device="mps"` (or any other string) * Simplify `if` nesting * Update src/transformers/pipelines/base.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Fix? @sgugger * passing `attr=None` is not the same as not passing `attr` 🤯 Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- src/transformers/pipelines/__init__.py | 7 +++++++ src/transformers/pipelines/base.py | 19 +++++++++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 5752790aa9614b..74f6e796801c7e 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -422,6 +422,7 @@ def pipeline( revision: Optional[str] = None, use_fast: bool = True, use_auth_token: Optional[Union[str, bool]] = None, + device: Optional[Union[int, str, "torch.device"]] = None, device_map=None, torch_dtype=None, trust_remote_code: Optional[bool] = None, @@ -508,6 +509,9 @@ def pipeline( use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + device (`int` or `str` or `torch.device`): + Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this + pipeline will be allocated. device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*): Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set `device_map="auto"` to compute the most optimized `device_map` automatically. [More @@ -811,4 +815,7 @@ def pipeline( if feature_extractor is not None: kwargs["feature_extractor"] = feature_extractor + if device is not None: + kwargs["device"] = device + return pipeline_class(model=model, framework=framework, task=task, **kwargs) diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index 6e2c28e5ddf84d..a0ce06ec5e33f1 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -704,7 +704,7 @@ def predict(self, X): Reference to the object in charge of parsing supplied pipeline parameters. device (`int`, *optional*, defaults to -1): Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model on - the associated CUDA device id. You can pass native `torch.device` too. + the associated CUDA device id. You can pass native `torch.device` or a `str` too. binary_output (`bool`, *optional*, defaults to `False`): Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text. """ @@ -747,7 +747,7 @@ def __init__( framework: Optional[str] = None, task: str = "", args_parser: ArgumentHandler = None, - device: int = -1, + device: Union[int, str, "torch.device"] = -1, binary_output: bool = False, **kwargs, ): @@ -760,14 +760,21 @@ def __init__( self.feature_extractor = feature_extractor self.modelcard = modelcard self.framework = framework - if is_torch_available() and isinstance(device, torch.device): - self.device = device + if is_torch_available() and self.framework == "pt": + if isinstance(device, torch.device): + self.device = device + elif isinstance(device, str): + self.device = torch.device(device) + elif device < 0: + self.device = torch.device("cpu") + else: + self.device = torch.device("cuda:{device}") else: - self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else f"cuda:{device}") + self.device = device self.binary_output = binary_output # Special handling - if self.framework == "pt" and self.device.type == "cuda": + if self.framework == "pt" and self.device.type != "cpu": self.model = self.model.to(self.device) # Update config with task specific parameters From 6936e7c4875cab371d4d13e5c8e27a6b53276f0e Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Wed, 10 Aug 2022 11:20:39 -0700 Subject: [PATCH 067/539] Update philosophy to include other preprocessing classes (#18550) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📝 update philosophy to include other preprocessing classes * 🖍 apply feedbacks --- docs/source/en/philosophy.mdx | 53 ++++++++++++++++------------------- 1 file changed, 24 insertions(+), 29 deletions(-) diff --git a/docs/source/en/philosophy.mdx b/docs/source/en/philosophy.mdx index 13134c31d4a6b9..1aca1accab9304 100644 --- a/docs/source/en/philosophy.mdx +++ b/docs/source/en/philosophy.mdx @@ -14,29 +14,28 @@ specific language governing permissions and limitations under the License. 🤗 Transformers is an opinionated library built for: -- NLP researchers and educators seeking to use/study/extend large-scale transformers models -- hands-on practitioners who want to fine-tune those models and/or serve them in production -- engineers who just want to download a pretrained model and use it to solve a given NLP task. +- machine learning researchers and educators seeking to use, study or extend large-scale Transformers models. +- hands-on practitioners who want to fine-tune those models or serve them in production, or both. +- engineers who just want to download a pretrained model and use it to solve a given machine learning task. The library was designed with two strong goals in mind: -- Be as easy and fast to use as possible: +1. Be as easy and fast to use as possible: - We strongly limited the number of user-facing abstractions to learn, in fact, there are almost no abstractions, just three standard classes required to use each model: [configuration](main_classes/configuration), - [models](main_classes/model) and [tokenizer](main_classes/tokenizer). + [models](main_classes/model), and a preprocessing class ([tokenizer](main_classes/tokenizer) for NLP, [feature extractor](main_classes/feature_extractor) for vision and audio, and [processor](main_classes/processors) for multimodal inputs). - All of these classes can be initialized in a simple and unified way from pretrained instances by using a common - `from_pretrained()` instantiation method which will take care of downloading (if needed), caching and - loading the related class instance and associated data (configurations' hyper-parameters, tokenizers' vocabulary, + `from_pretrained()` method which downloads (if needed), caches and + loads the related class instance and associated data (configurations' hyperparameters, tokenizers' vocabulary, and models' weights) from a pretrained checkpoint provided on [Hugging Face Hub](https://huggingface.co/models) or your own saved checkpoint. - On top of those three base classes, the library provides two APIs: [`pipeline`] for quickly - using a model (plus its associated tokenizer and configuration) on a given task and - [`Trainer`]/`Keras.fit` to quickly train or fine-tune a given model. + using a model for inference on a given task and [`Trainer`] to quickly train or fine-tune a PyTorch model (all TensorFlow models are compatible with `Keras.fit`). - As a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to - extend/build-upon the library, just use regular Python/PyTorch/TensorFlow/Keras modules and inherit from the base - classes of the library to reuse functionalities like model loading/saving. + extend or build upon the library, just use regular Python, PyTorch, TensorFlow, Keras modules and inherit from the base + classes of the library to reuse functionalities like model loading and saving. If you'd like to learn more about our coding philosophy for models, check out our [Repeat Yourself](https://huggingface.co/blog/transformers-design-philosophy) blog post. -- Provide state-of-the-art models with performances as close as possible to the original models: +2. Provide state-of-the-art models with performances as close as possible to the original models: - We provide at least one example for each architecture which reproduces a result provided by the official authors of said architecture. @@ -48,33 +47,29 @@ A few other goals: - Expose the models' internals as consistently as possible: - We give access, using a single API, to the full hidden-states and attention weights. - - Tokenizer and base model's API are standardized to easily switch between models. + - The preprocessing classes and base model APIs are standardized to easily switch between models. -- Incorporate a subjective selection of promising tools for fine-tuning/investigating these models: +- Incorporate a subjective selection of promising tools for fine-tuning and investigating these models: - - A simple/consistent way to add new tokens to the vocabulary and embeddings for fine-tuning. - - Simple ways to mask and prune transformer heads. + - A simple and consistent way to add new tokens to the vocabulary and embeddings for fine-tuning. + - Simple ways to mask and prune Transformer heads. -- Switch easily between PyTorch and TensorFlow 2.0, allowing training using one framework and inference using another. +- Easily switch between PyTorch, TensorFlow 2.0 and Flax, allowing training with one framework and inference with another. ## Main concepts The library is built around three types of classes for each model: -- **Model classes** such as [`BertModel`], which are 30+ PyTorch models ([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)) or Keras models ([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)) that work with the pretrained weights provided in the - library. -- **Configuration classes** such as [`BertConfig`], which store all the parameters required to build - a model. You don't always need to instantiate these yourself. In particular, if you are using a pretrained model - without any modification, creating the model will automatically take care of instantiating the configuration (which - is part of the model). -- **Tokenizer classes** such as [`BertTokenizer`], which store the vocabulary for each model and - provide methods for encoding/decoding strings in a list of token embeddings indices to be fed to a model. +- **Model classes** can be PyTorch models ([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras models ([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)) or JAX/Flax models ([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen.html)) that work with the pretrained weights provided in the library. +- **Configuration classes** store the hyperparameters required to build a model (such as the number of layers and hidden size). You don't always need to instantiate these yourself. In particular, if you are using a pretrained model without any modification, creating the model will automatically take care of instantiating the configuration (which is part of the model). +- **Preprocessing classes** convert the raw data into a format accepted by the model. A [tokenizer](main_classes/tokenizer) stores the vocabulary for each model and provide methods for encoding and decoding strings in a list of token embedding indices to be fed to a model. [Feature extractors](main_classes/feature_extractor) preprocess audio or vision inputs, and a [processor](main_classes/processors) handles multimodal inputs. -All these classes can be instantiated from pretrained instances and saved locally using two methods: +All these classes can be instantiated from pretrained instances, saved locally, and shared on the Hub with three methods: -- `from_pretrained()` lets you instantiate a model/configuration/tokenizer from a pretrained version either +- `from_pretrained()` lets you instantiate a model, configuration, and preprocessing class from a pretrained version either provided by the library itself (the supported models can be found on the [Model Hub](https://huggingface.co/models)) or - stored locally (or on a server) by the user, -- `save_pretrained()` lets you save a model/configuration/tokenizer locally so that it can be reloaded using + stored locally (or on a server) by the user. +- `save_pretrained()` lets you save a model, configuration, and preprocessing class locally so that it can be reloaded using `from_pretrained()`. +- `push_to_hub()` lets you share a model, configuration, and a preprocessing class to the Hub, so it is easily accessible to everyone. From 50949fab74d7d40ad4057f694ce843dae5349d60 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 10 Aug 2022 15:46:03 -0400 Subject: [PATCH 068/539] Properly move cache when it is not in default path (#18563) --- src/transformers/utils/hub.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 00f9c277c41773..163ad64ffa173b 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -1070,7 +1070,11 @@ def move_cache(cache_dir=None, new_cache_dir=None, token=None): "`transformers.utils.move_cache()`." ) try: - move_cache() + if TRANSFORMERS_CACHE != default_cache_path: + # Users set some env variable to customize cache storage + move_cache(TRANSFORMERS_CACHE, TRANSFORMERS_CACHE) + else: + move_cache() except Exception as e: trace = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( From f62cb8313c2d7051e38f845823c1f4a7307aac3e Mon Sep 17 00:00:00 2001 From: Dhruv Karan Date: Thu, 11 Aug 2022 01:17:31 +0530 Subject: [PATCH 069/539] Adds CLIP to models exportable with ONNX (#18515) * onnx config for clip * default opset as 14 * changes from the original repo * input values order fix * outputs fix * remove unused import * ran make fix-copies * black format * review comments: forward ref, import fix, model change revert, .to cleanup * make style * formatting fixes * revert groupvit * comment for cast to int32 * comment fix * make .T as .t() for onnx conversion * ran make fix-copies * remove unneeded comment Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * fix copies * remove comment Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- docs/source/en/serialization.mdx | 1 + src/transformers/models/clip/__init__.py | 16 +++++- .../models/clip/configuration_clip.py | 50 ++++++++++++++++++- src/transformers/models/clip/modeling_clip.py | 9 ++-- .../models/groupvit/modeling_groupvit.py | 7 ++- .../models/owlvit/modeling_owlvit.py | 2 +- .../modeling_vision_text_dual_encoder.py | 2 +- src/transformers/onnx/features.py | 4 ++ tests/onnx/test_onnx_v2.py | 1 + 9 files changed, 82 insertions(+), 10 deletions(-) diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index 9561bbd8ec77c1..0aacdf76f7ef0f 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -55,6 +55,7 @@ Ready-made configurations include the following architectures: - BlenderbotSmall - BLOOM - CamemBERT +- CLIP - CodeGen - ConvBERT - ConvNeXT diff --git a/src/transformers/models/clip/__init__.py b/src/transformers/models/clip/__init__.py index 6a6e64c995d385..932130f8d5fdf9 100644 --- a/src/transformers/models/clip/__init__.py +++ b/src/transformers/models/clip/__init__.py @@ -29,7 +29,13 @@ _import_structure = { - "configuration_clip": ["CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPTextConfig", "CLIPVisionConfig"], + "configuration_clip": [ + "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "CLIPConfig", + "CLIPOnnxConfig", + "CLIPTextConfig", + "CLIPVisionConfig", + ], "tokenization_clip": ["CLIPTokenizer"], } @@ -95,7 +101,13 @@ if TYPE_CHECKING: - from .configuration_clip import CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPTextConfig, CLIPVisionConfig + from .configuration_clip import ( + CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + CLIPConfig, + CLIPOnnxConfig, + CLIPTextConfig, + CLIPVisionConfig, + ) from .tokenization_clip import CLIPTokenizer try: diff --git a/src/transformers/models/clip/configuration_clip.py b/src/transformers/models/clip/configuration_clip.py index 3bb22b74a4c77c..a118b179e4c09f 100644 --- a/src/transformers/models/clip/configuration_clip.py +++ b/src/transformers/models/clip/configuration_clip.py @@ -16,9 +16,16 @@ import copy import os -from typing import Union +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Mapping, Optional, Union + + +if TYPE_CHECKING: + from ...processing_utils import ProcessorMixin + from ...utils import TensorType from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig from ...utils import logging @@ -317,3 +324,44 @@ def to_dict(self): output["vision_config"] = self.vision_config.to_dict() output["model_type"] = self.__class__.model_type return output + + +class CLIPOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("input_ids", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch"}), + ("attention_mask", {0: "batch", 1: "sequence"}), + ] + ) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("logits_per_image", {0: "batch"}), + ("logits_per_text", {0: "batch"}), + ("text_embeds", {0: "batch"}), + ("image_embeds", {0: "batch"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + def generate_dummy_inputs( + self, + processor: "ProcessorMixin", + framework: Optional["TensorType"] = None, + ) -> Mapping[str, Any]: + + text_input_dict = super().generate_dummy_inputs(processor.tokenizer, framework=framework) + image_input_dict = super().generate_dummy_inputs(processor.feature_extractor, framework=framework) + return {**text_input_dict, **image_input_dict} + + @property + def default_onnx_opset(self) -> int: + return 14 diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index ddc2236371c29a..799d0ef0462afc 100755 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -68,7 +68,7 @@ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: def clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) - image_loss = contrastive_loss(similarity.T) + image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @@ -660,7 +660,10 @@ def forward( # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) - pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)] + # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 + pooled_output = last_hidden_state[ + torch.arange(last_hidden_state.shape[0]), input_ids.to(torch.int).argmax(dim=-1) + ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] @@ -1050,7 +1053,7 @@ def forward( # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale - logits_per_image = logits_per_text.T + logits_per_image = logits_per_text.t() loss = None if return_loss: diff --git a/src/transformers/models/groupvit/modeling_groupvit.py b/src/transformers/models/groupvit/modeling_groupvit.py index 1073d4bfea8708..9817065ab37a55 100644 --- a/src/transformers/models/groupvit/modeling_groupvit.py +++ b/src/transformers/models/groupvit/modeling_groupvit.py @@ -72,7 +72,7 @@ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->groupvit def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) - image_loss = contrastive_loss(similarity.T) + image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @@ -1132,7 +1132,10 @@ def forward( # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) - pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)] + # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 + pooled_output = last_hidden_state[ + torch.arange(last_hidden_state.shape[0]), input_ids.to(torch.int).argmax(dim=-1) + ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index 35ebd16cf25bd8..73ee2597f1b163 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -71,7 +71,7 @@ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->owlvit def owlvit_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) - image_loss = contrastive_loss(similarity.T) + image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 diff --git a/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py b/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py index 66340deaf4927f..64fd2f405d5084 100755 --- a/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py +++ b/src/transformers/models/vision_text_dual_encoder/modeling_vision_text_dual_encoder.py @@ -154,7 +154,7 @@ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: # Copied from transformers.models.clip.modeling_clip.clip_loss def clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) - image_loss = contrastive_loss(similarity.T) + image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index 8d8b8190e46819..fbfeb47250e73f 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -201,6 +201,10 @@ class FeaturesManager: "question-answering", onnx_config_cls="models.camembert.CamembertOnnxConfig", ), + "clip": supported_features_mapping( + "default", + onnx_config_cls="models.clip.CLIPOnnxConfig", + ), "codegen": supported_features_mapping( "default", "causal-lm", diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index 98ab0fad131e01..5634abc7706856 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -185,6 +185,7 @@ def test_values_override(self): ("big-bird", "google/bigbird-roberta-base"), ("ibert", "kssteven/ibert-roberta-base"), ("camembert", "camembert-base"), + ("clip", "openai/clip-vit-base-patch32"), ("convbert", "YituTech/conv-bert-base"), ("codegen", "Salesforce/codegen-350M-multi"), ("deberta", "microsoft/deberta-base"), From 9a9a525be8310a374b6543f7ddaa4c48c9893828 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 10 Aug 2022 22:41:58 +0200 Subject: [PATCH 070/539] raise atol for MT5OnnxConfig (#18560) Co-authored-by: ydshieh --- src/transformers/models/mt5/configuration_mt5.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/mt5/configuration_mt5.py b/src/transformers/models/mt5/configuration_mt5.py index 3e72831ad25fbc..d9232c94629db2 100644 --- a/src/transformers/models/mt5/configuration_mt5.py +++ b/src/transformers/models/mt5/configuration_mt5.py @@ -147,9 +147,9 @@ def num_hidden_layers(self): return self.num_layers -# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig class MT5OnnxConfig(OnnxSeq2SeqConfigWithPast): @property + # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def inputs(self) -> Mapping[str, Mapping[int, str]]: common_inputs = { "input_ids": {0: "batch", 1: "encoder_sequence"}, @@ -169,5 +169,10 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: return common_inputs @property + # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def default_onnx_opset(self) -> int: return 13 + + @property + def atol_for_validation(self) -> float: + return 5e-4 From 051311ff66e7b23bfcfc42bc514c969517323ce9 Mon Sep 17 00:00:00 2001 From: Michael Wyatt Date: Wed, 10 Aug 2022 15:28:19 -0700 Subject: [PATCH 071/539] fix string (#18568) --- src/transformers/pipelines/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index a0ce06ec5e33f1..7842b95b32859c 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -768,7 +768,7 @@ def __init__( elif device < 0: self.device = torch.device("cpu") else: - self.device = torch.device("cuda:{device}") + self.device = torch.device(f"cuda:{device}") else: self.device = device self.binary_output = binary_output From 76568d24b67d50ca82192fa53e8b9ebb3dd27b42 Mon Sep 17 00:00:00 2001 From: Maxime G Date: Thu, 11 Aug 2022 10:59:37 +0200 Subject: [PATCH 072/539] Segformer TF: fix output size in documentation (#18572) * Segformer TF: fix output size in doc * Segformer pytorch: fix output size in doc Co-authored-by: Maxime Gardoni --- src/transformers/models/segformer/modeling_segformer.py | 2 +- src/transformers/models/segformer/modeling_tf_segformer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/segformer/modeling_segformer.py b/src/transformers/models/segformer/modeling_segformer.py index b8be4cdb70a6d0..4e70eb957acf3f 100755 --- a/src/transformers/models/segformer/modeling_segformer.py +++ b/src/transformers/models/segformer/modeling_segformer.py @@ -784,7 +784,7 @@ def forward( >>> inputs = feature_extractor(images=image, return_tensors="pt") >>> outputs = model(**inputs) - >>> logits = outputs.logits # shape (batch_size, num_labels, height, width) + >>> logits = outputs.logits # shape (batch_size, num_labels, height/4, width/4) >>> list(logits.shape) [1, 150, 128, 128] ```""" diff --git a/src/transformers/models/segformer/modeling_tf_segformer.py b/src/transformers/models/segformer/modeling_tf_segformer.py index c2f4b2ff0c7cd8..2ff256d78d2b1d 100644 --- a/src/transformers/models/segformer/modeling_tf_segformer.py +++ b/src/transformers/models/segformer/modeling_tf_segformer.py @@ -847,7 +847,7 @@ def call( >>> inputs = feature_extractor(images=image, return_tensors="tf") >>> outputs = model(**inputs, training=False) - >>> # logits are of shape (batch_size, num_labels, height, width) + >>> # logits are of shape (batch_size, num_labels, height/4, width/4) >>> logits = outputs.logits >>> list(logits.shape) [1, 150, 128, 128] From f762f373cc701c25e6f2f71c8aaa45ee3af75910 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Thu, 11 Aug 2022 15:44:23 +0300 Subject: [PATCH 073/539] Fix resizing bug in OWL-ViT (#18573) * Fixes resizing bug in OWL-ViT * Defaults to square resize if size is set to an int * Sets do_center_crop default value to False --- .../models/owlvit/feature_extraction_owlvit.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/owlvit/feature_extraction_owlvit.py b/src/transformers/models/owlvit/feature_extraction_owlvit.py index 1e4bc735608a35..f8a45706835d8f 100644 --- a/src/transformers/models/owlvit/feature_extraction_owlvit.py +++ b/src/transformers/models/owlvit/feature_extraction_owlvit.py @@ -50,13 +50,15 @@ class OwlViTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the shorter edge of the input to a certain `size`. - size (`int`, *optional*, defaults to 768): - Resize the shorter edge of the input to the given size. Only has an effect if `do_resize` is set to `True`. + size (`int` or `Tuple[int, int]`, *optional*, defaults to (768, 768)): + The size to use for resizing the image. Only has an effect if `do_resize` is set to `True`. If `size` is a + sequence like (h, w), output size will be matched to this. If `size` is an int, then image will be resized + to (size, size). resample (`int`, *optional*, defaults to `PIL.Image.BICUBIC`): An optional resampling filter. This can be one of `PIL.Image.NEAREST`, `PIL.Image.BOX`, `PIL.Image.BILINEAR`, `PIL.Image.HAMMING`, `PIL.Image.BICUBIC` or `PIL.Image.LANCZOS`. Only has an effect if `do_resize` is set to `True`. - do_center_crop (`bool`, *optional*, defaults to `True`): + do_center_crop (`bool`, *optional*, defaults to `False`): Whether to crop the input at the center. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. crop_size (`int`, *optional*, defaults to 768): @@ -74,10 +76,10 @@ class OwlViTFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin def __init__( self, do_resize=True, - size=768, + size=(768, 768), resample=Image.BICUBIC, crop_size=768, - do_center_crop=True, + do_center_crop=False, do_normalize=True, image_mean=None, image_std=None, @@ -195,7 +197,7 @@ def __call__( # transformations (resizing + center cropping + normalization) if self.do_resize and self.size is not None and self.resample is not None: images = [ - self.resize(image=image, size=self.size, resample=self.resample, default_to_square=False) + self.resize(image=image, size=self.size, resample=self.resample, default_to_square=True) for image in images ] if self.do_center_crop and self.crop_size is not None: From 4c8ec66a7433589436d13d95d48601f274c92b44 Mon Sep 17 00:00:00 2001 From: "Wonseok Lee (Jack)" Date: Thu, 11 Aug 2022 21:51:39 +0900 Subject: [PATCH 074/539] Fix LayoutLMv3 documentation (#17932) * fix typos * fix sequence_length docs of LayoutLMv3Model * delete trailing white spaces * fix layoutlmv3 docs more * apply make fixup & quality * change to two versions of input docstring * apply make fixup & quality --- .../models/layoutlmv3/modeling_layoutlmv3.py | 112 ++++++++++++++++-- 1 file changed, 99 insertions(+), 13 deletions(-) diff --git a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py index f3bdd2cd8d9067..68987e38e9942e 100644 --- a/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_layoutlmv3.py @@ -54,17 +54,93 @@ behavior. Parameters: - config ([`LayoutLMv2Config`]): Model configuration class with all the parameters of the model. + config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ -LAYOUTLMV3_INPUTS_DOCSTRING = r""" +LAYOUTLMV3_MODEL_INPUTS_DOCSTRING = r""" Args: - input_ids (`torch.LongTensor` of shape `{0}`): + input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. - Indices can be obtained using [`LayoutLMv2Tokenizer`]. See [`PreTrainedTokenizer.encode`] and + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + Indices can be obtained using [`LayoutLMv3Tokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + + bbox (`torch.LongTensor` of shape `({0}, 4)`, *optional*): + Bounding boxes of each input sequence tokens. Selected in the range `[0, + config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) + format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, + y1) represents the position of the lower right corner. + + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size, + config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height / + config.patch_size) * (width / config.patch_size))`. + + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`LayoutLMv3Tokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) @@ -76,16 +152,18 @@ y1) represents the position of the lower right corner. pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): - Batch of document images. + Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size, + config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height / + config.patch_size) * (width / config.patch_size))`. - attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*): + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) - token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*): + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: @@ -93,7 +171,7 @@ - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) - position_ids (`torch.LongTensor` of shape `{0}`, *optional*): + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. @@ -104,7 +182,7 @@ - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. - inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. @@ -763,7 +841,9 @@ def forward_image(self, pixel_values): return embeddings - @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) + @add_start_docstrings_to_model_forward( + LAYOUTLMV3_MODEL_INPUTS_DOCSTRING.format("batch_size, token_sequence_length") + ) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, @@ -975,7 +1055,9 @@ def __init__(self, config): self.init_weights() - @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_start_docstrings_to_model_forward( + LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, @@ -1084,7 +1166,9 @@ def __init__(self, config): self.init_weights() - @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_start_docstrings_to_model_forward( + LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, @@ -1214,7 +1298,9 @@ def __init__(self, config): self.init_weights() - @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_start_docstrings_to_model_forward( + LAYOUTLMV3_DOWNSTREAM_INPUTS_DOCSTRING.format("batch_size, sequence_length") + ) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, From 3f0707b2fe78bc690793783a63a37b31d124fe90 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Thu, 11 Aug 2022 09:33:41 -0400 Subject: [PATCH 075/539] Skip broken tests --- tests/models/owlvit/test_modeling_owlvit.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/owlvit/test_modeling_owlvit.py b/tests/models/owlvit/test_modeling_owlvit.py index edddc53beeab88..7564d192ad9898 100644 --- a/tests/models/owlvit/test_modeling_owlvit.py +++ b/tests/models/owlvit/test_modeling_owlvit.py @@ -733,8 +733,9 @@ def prepare_img(): @require_vision @require_torch +@unittest.skip("These tests are broken, fix me Alara") class OwlViTModelIntegrationTest(unittest.TestCase): - # @slow + @slow def test_inference(self): model_name = "google/owlvit-base-patch32" model = OwlViTModel.from_pretrained(model_name).to(torch_device) From 80468251bc3771d53427f77aa2dc9d49a55d2bf0 Mon Sep 17 00:00:00 2001 From: Dan Jones Date: Thu, 11 Aug 2022 14:45:04 +0100 Subject: [PATCH 076/539] Change BartLearnedPositionalEmbedding's forward method signature to support Opacus training (#18486) * changing BartLearnedPositionalEmbedding forward signature and references to it * removing debugging dead code (thanks style checker) * blackened modeling_bart file * removing copy inconsistencies via make fix-copies * changing references to copied signatures in Bart variants * make fix-copies once more * using expand over repeat (thanks @michaelbenayoun) * expand instead of repeat for all model copies Co-authored-by: Daniel Jones --- src/transformers/models/bart/modeling_bart.py | 26 +++++++++++-------- .../models/mbart/modeling_mbart.py | 23 +++++++++------- src/transformers/models/mvp/modeling_mvp.py | 22 ++++++++++------ .../models/plbart/modeling_plbart.py | 26 +++++++++++-------- .../models/trocr/modeling_trocr.py | 19 +++++++++----- 5 files changed, 70 insertions(+), 46 deletions(-) diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index 8411cc6cefefed..525da6f34b06cf 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -128,12 +128,14 @@ def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) - def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): - """`input_ids_shape` is expected to be [bsz x seqlen].""" - bsz, seq_len = input_ids_shape[:2] + def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): + """`input_ids' shape is expected to be [bsz x seqlen].""" + + bsz, seq_len = input_ids.shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device - ) + ).expand(bsz, -1) + return super().forward(positions + self.offset) @@ -788,17 +790,17 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) + input = input_ids + input_ids = input_ids.view(-1, input_ids.shape[-1]) elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale - embed_pos = self.embed_positions(input_shape) + embed_pos = self.embed_positions(input) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) @@ -1015,10 +1017,12 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() + input = input_ids + input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") @@ -1026,7 +1030,7 @@ def forward( past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + inputs_embeds = self.embed_tokens(input) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length @@ -1038,7 +1042,7 @@ def forward( encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions - positions = self.embed_positions(input_shape, past_key_values_length) + positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) diff --git a/src/transformers/models/mbart/modeling_mbart.py b/src/transformers/models/mbart/modeling_mbart.py index 16ea95bc0aedde..66011fe6a73d0a 100755 --- a/src/transformers/models/mbart/modeling_mbart.py +++ b/src/transformers/models/mbart/modeling_mbart.py @@ -134,12 +134,14 @@ def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) - def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): - """`input_ids_shape` is expected to be [bsz x seqlen].""" - bsz, seq_len = input_ids_shape[:2] + def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): + """`input_ids' shape is expected to be [bsz x seqlen].""" + + bsz, seq_len = input_ids.shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device - ) + ).expand(bsz, -1) + return super().forward(positions + self.offset) @@ -783,17 +785,18 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() + input = input_ids + input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale - embed_pos = self.embed_positions(input_shape) + embed_pos = self.embed_positions(input) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) @@ -1013,10 +1016,12 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() + input = input_ids + input_shape = input.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") @@ -1036,7 +1041,7 @@ def forward( encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions - positions = self.embed_positions(input_shape, past_key_values_length) + positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) diff --git a/src/transformers/models/mvp/modeling_mvp.py b/src/transformers/models/mvp/modeling_mvp.py index d3d239c4cff125..37c1a7d837f7ba 100644 --- a/src/transformers/models/mvp/modeling_mvp.py +++ b/src/transformers/models/mvp/modeling_mvp.py @@ -134,12 +134,14 @@ def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) - def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): - """`input_ids_shape` is expected to be [bsz x seqlen].""" - bsz, seq_len = input_ids_shape[:2] + def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): + """`input_ids' shape is expected to be [bsz x seqlen].""" + + bsz, seq_len = input_ids.shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device - ) + ).expand(bsz, -1) + return super().forward(positions + self.offset) @@ -895,17 +897,19 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() + input = input_ids + input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale - embed_pos = self.embed_positions(input_shape) + embed_pos = self.embed_positions(input) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) @@ -1144,10 +1148,12 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() + input = input_ids + input_shape = input_ids.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") @@ -1167,7 +1173,7 @@ def forward( encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions - positions = self.embed_positions(input_shape, past_key_values_length) + positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) diff --git a/src/transformers/models/plbart/modeling_plbart.py b/src/transformers/models/plbart/modeling_plbart.py index d03ddf33ebfa7a..d86decb568192e 100755 --- a/src/transformers/models/plbart/modeling_plbart.py +++ b/src/transformers/models/plbart/modeling_plbart.py @@ -131,12 +131,14 @@ def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) - def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): - """`input_ids_shape` is expected to be [bsz x seqlen].""" - bsz, seq_len = input_ids_shape[:2] + def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): + """`input_ids' shape is expected to be [bsz x seqlen].""" + + bsz, seq_len = input_ids.shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device - ) + ).expand(bsz, -1) + return super().forward(positions + self.offset) @@ -759,17 +761,17 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) + input = input_ids + input_ids = input_ids.view(-1, input_ids.shape[-1]) elif inputs_embeds is not None: - input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale - embed_pos = self.embed_positions(input_shape) + embed_pos = self.embed_positions(input) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) @@ -987,10 +989,12 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() + input = input_ids + input_shape = input.shape input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") @@ -998,7 +1002,7 @@ def forward( past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + inputs_embeds = self.embed_tokens(input) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length @@ -1010,7 +1014,7 @@ def forward( encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions - positions = self.embed_positions(input_shape, past_key_values_length) + positions = self.embed_positions(input, past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = self.layernorm_embedding(hidden_states) diff --git a/src/transformers/models/trocr/modeling_trocr.py b/src/transformers/models/trocr/modeling_trocr.py index a79e5e901d67c4..e25f73c8b7d3b5 100644 --- a/src/transformers/models/trocr/modeling_trocr.py +++ b/src/transformers/models/trocr/modeling_trocr.py @@ -87,12 +87,14 @@ def __init__(self, num_embeddings: int, embedding_dim: int): self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim) - def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): - """`input_ids_shape` is expected to be [bsz x seqlen].""" - bsz, seq_len = input_ids_shape[:2] + def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): + """`input_ids' shape is expected to be [bsz x seqlen].""" + + bsz, seq_len = input_ids.shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device - ) + ).expand(bsz, -1) + return super().forward(positions + self.offset) @@ -626,10 +628,11 @@ def forward( if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: - input_shape = input_ids.size() - input_ids = input_ids.view(-1, input_shape[-1]) + input = input_ids + input_ids = input_ids.view(-1, input.shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") @@ -640,7 +643,7 @@ def forward( inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale if self.config.use_learned_position_embeddings: - embed_pos = self.embed_positions(input_shape, past_key_values_length=past_key_values_length) + embed_pos = self.embed_positions(input, past_key_values_length=past_key_values_length) else: embed_pos = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) @@ -651,6 +654,8 @@ def forward( hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + input_shape = input.shape + attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) From 5d3f03743301acf865be0dda93182c0abaadc49e Mon Sep 17 00:00:00 2001 From: flozi00 Date: Thu, 11 Aug 2022 15:52:27 +0200 Subject: [PATCH 077/539] german docs translation (#18544) * Create _config.py * Create _toctree.yml * Create index.mdx not sure about "du / ihr" oder "sie" * Create quicktour.mdx * Update _toctree.yml * Update build_documentation.yml * Update build_pr_documentation.yml * fix build * Update index.mdx * Update quicktour.mdx * Create installation.mdx * Update _toctree.yml --- .github/workflows/build_documentation.yml | 2 +- .github/workflows/build_pr_documentation.yml | 2 +- docs/source/de/_config.py | 14 + docs/source/de/_toctree.yml | 8 + docs/source/de/index.mdx | 322 ++++++++++++++ docs/source/de/installation.mdx | 246 +++++++++++ docs/source/de/quicktour.mdx | 428 +++++++++++++++++++ 7 files changed, 1020 insertions(+), 2 deletions(-) create mode 100644 docs/source/de/_config.py create mode 100644 docs/source/de/_toctree.yml create mode 100644 docs/source/de/index.mdx create mode 100644 docs/source/de/installation.mdx create mode 100644 docs/source/de/quicktour.mdx diff --git a/.github/workflows/build_documentation.yml b/.github/workflows/build_documentation.yml index fb28fe4f2bdf92..d78fd53a041596 100644 --- a/.github/workflows/build_documentation.yml +++ b/.github/workflows/build_documentation.yml @@ -15,6 +15,6 @@ jobs: commit_sha: ${{ github.sha }} package: transformers notebook_folder: transformers_doc - languages: en es it pt + languages: de en es it pt secrets: token: ${{ secrets.HUGGINGFACE_PUSH }} diff --git a/.github/workflows/build_pr_documentation.yml b/.github/workflows/build_pr_documentation.yml index 8a4dc5a06ec87c..efe9965c3be24a 100644 --- a/.github/workflows/build_pr_documentation.yml +++ b/.github/workflows/build_pr_documentation.yml @@ -14,4 +14,4 @@ jobs: commit_sha: ${{ github.event.pull_request.head.sha }} pr_number: ${{ github.event.number }} package: transformers - languages: en es it pt + languages: de en es it pt diff --git a/docs/source/de/_config.py b/docs/source/de/_config.py new file mode 100644 index 00000000000000..a6d75853f57219 --- /dev/null +++ b/docs/source/de/_config.py @@ -0,0 +1,14 @@ +# docstyle-ignore +INSTALL_CONTENT = """ +# Transformers installation +! pip install transformers datasets +# To install from source instead of the last release, comment the command above and uncomment the following one. +# ! pip install git+https://github.com/huggingface/transformers.git +""" + +notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] +black_avoid_patterns = { + "{processor_class}": "FakeProcessorClass", + "{model_class}": "FakeModelClass", + "{object_class}": "FakeObjectClass", +} diff --git a/docs/source/de/_toctree.yml b/docs/source/de/_toctree.yml new file mode 100644 index 00000000000000..6097df8d06ae0b --- /dev/null +++ b/docs/source/de/_toctree.yml @@ -0,0 +1,8 @@ +- sections: + - local: index + title: 🤗 Transformers + - local: quicktour + title: Schnellstart + - local: installation + title: Installation + title: Erste Schritte diff --git a/docs/source/de/index.mdx b/docs/source/de/index.mdx new file mode 100644 index 00000000000000..815fd1724f4412 --- /dev/null +++ b/docs/source/de/index.mdx @@ -0,0 +1,322 @@ + + +# 🤗 Transformers + +Maschinelles Lernen auf dem neuesten Stand der Technik für PyTorch, TensorFlow und JAX. + +🤗 Transformers bietet APIs zum einfachen Herunterladen und Trainieren von vortrainierten Modellen auf dem neuesten Stand der Technik. Die Verwendung von vortrainierten Modellen kann Rechenkosten sparen und den CO2-Fußabdruck reduzieren und Zeit sparen, die für das Training eines Modells von Grund auf benötigt wird. Die Modelle können für verschiedene Modalitäten verwendet werden, wie z. B.: + +* 📝 Text: Textklassifizierung, Informationsextrahierung, Beantwortung von Fragen, Zusammenfassung, Übersetzung und Texterstellung in über 100 Sprachen. +* 🖼️ Bilder: Bildklassifizierung, Objekterkennung und Segmentierung. +* 🗣️ Audio: Spracherkennung und Audioklassifizierung. +* 🐙 Multimodal: Beantwortung von Tabellenfragen, optische Zeichenerkennung, Informationsextraktion aus gescannten Dokumenten, Videoklassifizierung und Beantwortung visueller Fragen. + +Unsere Bibliothek unterstützt die nahtlose Integration von drei der beliebtesten Deep-Learning-Bibliotheken: [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) und [JAX](https://jax.readthedocs.io/en/latest/). Trainieren Sie Ihr Modell in drei Codezeilen in einem Framework und laden Sie es zur Inferenz mit einem anderen. + +Jede 🤗 Transformers-Architektur ist in einem eigenständigen Python-Modul definiert, so dass sie leicht für Forschung und Experimente angepasst werden kann. + +## Wenn Sie auf der Suche nach individueller Unterstützung durch das Hugging Face-Team sind + + + HuggingFace Expert Acceleration Program +
+ +## Inhalt + +Die Dokumentation ist in fünf Teile gegliedert: + +- **GET STARTED** enthält eine kurze Tour und Installationsanweisungen, um mit 🤗 Transformers loszulegen. +- **TUTORIALS** sind ein hervorragender Ausgangspunkt, wenn Sie neu in unserer Bibliothek sind. Dieser Abschnitt hilft Ihnen, die grundlegenden Fähigkeiten zu erlangen, die Sie benötigen, um mit 🤗 Transformers zu arbeiten. +- **HOW-TO GUIDES** zeigen Ihnen, wie Sie ein bestimmtes Ziel erreichen können, z. B. die Feinabstimmung eines vortrainierten Modells für die Sprachmodellierung oder die Erstellung eines benutzerdefinierten Modellkopfs. +- **KONZEPTUELLE ANLEITUNGEN** bietet weitere Diskussionen und Erklärungen zu den zugrunde liegenden Konzepten und Ideen hinter Modellen, Aufgaben und der Designphilosophie von 🤗 Transformers. +- **API** beschreibt jede Klasse und Funktion, gruppiert in: + + - **MAIN CLASSES** für die Hauptklassen, die die wichtigsten APIs der Bibliothek darstellen. + - MODELLE** für die Klassen und Funktionen, die zu jedem in der Bibliothek implementierten Modell gehören. + - **INTERNAL HELPERS** für die Klassen und Funktionen, die wir intern verwenden. + +Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen, vortrainierte Modellgewichte, Nutzungsskripte und Konvertierungsprogramme für die folgenden Modelle. + +### Unterstütze Modelle + + + +1. **[ALBERT](model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut. +1. **[BART](model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer. +1. **[BARThez](model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis. +1. **[BARTpho](model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen. +1. **[BEiT](model_doc/beit)** (from Microsoft) released with the paper [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong, Furu Wei. +1. **[BERT](model_doc/bert)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. +1. **[BERT For Sequence Generation](model_doc/bert-generation)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. +1. **[BERTweet](model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen. +1. **[BigBird-Pegasus](model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed. +1. **[BigBird-RoBERTa](model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed. +1. **[Blenderbot](model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston. +1. **[BlenderbotSmall](model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston. +1. **[BLOOM](model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/). +1. **[BORT](model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry. +1. **[ByT5](model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel. +1. **[CamemBERT](model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot. +1. **[CANINE](model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. +1. **[CLIP](model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. +1. **[CodeGen](model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. +1. **[ConvBERT](model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. +1. **[ConvNeXT](model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. +1. **[CPM](model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. +1. **[CTRL](model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. +1. **[CvT](model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang. +1. **[Data2Vec](model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli. +1. **[DeBERTa](model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. +1. **[DeBERTa-v2](model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. +1. **[Decision Transformer](model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch. +1. **[DeiT](model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. +1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. +1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. +1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. +1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. +1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. +1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. +1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. +1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. +1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. +1. **[FLAVA](model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. +1. **[FNet](model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. +1. **[Funnel Transformer](model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le. +1. **[GLPN](model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim. +1. **[GPT](model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. +1. **[GPT Neo](model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. +1. **[GPT NeoX](model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach +1. **[GPT-2](model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. +1. **[GPT-J](model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. +1. **[GroupViT](model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang. +1. **[Hubert](model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed. +1. **[I-BERT](model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer. +1. **[ImageGPT](model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. +1. **[LayoutLM](model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. +1. **[LayoutLMv2](model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. +1. **[LayoutLMv3](model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. +1. **[LayoutXLM](model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. +1. **[LED](model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. +1. **[LeViT](model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze. +1. **[Longformer](model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. +1. **[LongT5](model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang. +1. **[LUKE](model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto. +1. **[LXMERT](model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. +1. **[M-CTC-T](model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert. +1. **[M2M100](model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. +1. **[MarianMT](model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. +1. **[MaskFormer](model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov. +1. **[mBART](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. +1. **[mBART-50](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. +1. **[Megatron-BERT](model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro. +1. **[Megatron-GPT2](model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro. +1. **[mLUKE](model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka. +1. **[MobileBERT](model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. +1. **[MobileViT](model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari. +1. **[MPNet](model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu. +1. **[MT5](model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel. +1. **[MVP](model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen. +1. **[Nezha](model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu. +1. **[NLLB](model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team. +1. **[Nyströmformer](model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh. +1. **[OPT](master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. +1. **[OWL-ViT](model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. +1. **[Pegasus](model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. +1. **[Perceiver IO](model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. +1. **[PhoBERT](model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. +1. **[PLBart](model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. +1. **[PoolFormer](model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng. +1. **[ProphetNet](model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. +1. **[QDQBert](model_doc/qdqbert)** (from NVIDIA) released with the paper [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius. +1. **[RAG](model_doc/rag)** (from Facebook) released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela. +1. **[REALM](model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang. +1. **[Reformer](model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya. +1. **[RegNet](model_doc/regnet)** (from META Platforms) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár. +1. **[RemBERT](model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder. +1. **[ResNet](model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. +1. **[RoBERTa](model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. +1. **[RoFormer](model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu. +1. **[SegFormer](model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. +1. **[SEW](model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. +1. **[SEW-D](model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. +1. **[SpeechToTextTransformer](model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino. +1. **[SpeechToTextTransformer2](model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau. +1. **[Splinter](model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy. +1. **[SqueezeBERT](model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer. +1. **[Swin Transformer](model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo. +1. **[Swin Transformer V2](model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo. +1. **[T5](model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. +1. **[T5v1.1](model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. +1. **[TAPAS](model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. +1. **[TAPEX](model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. +1. **[Trajectory Transformer](model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine +1. **[Transformer-XL](model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. +1. **[TrOCR](model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. +1. **[UL2](model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler +1. **[UniSpeech](model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. +1. **[UniSpeechSat](model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. +1. **[VAN](model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. +1. **[VideoMAE](model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. +1. **[ViLT](model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. +1. **[Vision Transformer (ViT)](model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. +1. **[VisualBERT](model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. +1. **[ViTMAE](model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. +1. **[Wav2Vec2](model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. +1. **[Wav2Vec2-Conformer](model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. +1. **[Wav2Vec2Phoneme](model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. +1. **[WavLM](model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[XGLM](model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. +1. **[XLM](model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. +1. **[XLM-ProphetNet](model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. +1. **[XLM-RoBERTa](model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. +1. **[XLM-RoBERTa-XL](model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau. +1. **[XLNet](model_doc/xlnet)** (from Google/CMU) released with the paper [​XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le. +1. **[XLS-R](model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli. +1. **[XLSR-Wav2Vec2](model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli. +1. **[YOLOS](model_doc/yolos)** (from Huazhong University of Science & Technology) released with the paper [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666) by Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu. +1. **[YOSO](model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling](https://arxiv.org/abs/2111.09714) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh. + + +### Unterstützte Frameworks + +Die folgende Tabelle zeigt die derzeitige Unterstützung in der Bibliothek für jedes dieser Modelle, unabhängig davon, ob sie einen Python +Tokenizer haben (als "langsam" bezeichnet), ein "schneller" Tokenizer, der von der 🤗 Tokenizers Bibliothek unterstützt wird, ob sie Unterstützung in Jax (via +Flax), PyTorch, und/oder TensorFlow haben. + + + +| Model | Tokenizer slow | Tokenizer fast | PyTorch support | TensorFlow support | Flax Support | +|:---------------------------:|:--------------:|:--------------:|:---------------:|:------------------:|:------------:| +| ALBERT | ✅ | ✅ | ✅ | ✅ | ✅ | +| BART | ✅ | ✅ | ✅ | ✅ | ✅ | +| BEiT | ❌ | ❌ | ✅ | ❌ | ✅ | +| BERT | ✅ | ✅ | ✅ | ✅ | ✅ | +| Bert Generation | ✅ | ❌ | ✅ | ❌ | ❌ | +| BigBird | ✅ | ✅ | ✅ | ❌ | ✅ | +| BigBird-Pegasus | ❌ | ❌ | ✅ | ❌ | ❌ | +| Blenderbot | ✅ | ✅ | ✅ | ✅ | ✅ | +| BlenderbotSmall | ✅ | ✅ | ✅ | ✅ | ✅ | +| BLOOM | ❌ | ✅ | ✅ | ❌ | ❌ | +| CamemBERT | ✅ | ✅ | ✅ | ✅ | ❌ | +| CANINE | ✅ | ❌ | ✅ | ❌ | ❌ | +| CLIP | ✅ | ✅ | ✅ | ✅ | ✅ | +| CodeGen | ✅ | ✅ | ✅ | ❌ | ❌ | +| ConvBERT | ✅ | ✅ | ✅ | ✅ | ❌ | +| ConvNeXT | ❌ | ❌ | ✅ | ✅ | ❌ | +| CTRL | ✅ | ❌ | ✅ | ✅ | ❌ | +| CvT | ❌ | ❌ | ✅ | ❌ | ❌ | +| Data2VecAudio | ❌ | ❌ | ✅ | ❌ | ❌ | +| Data2VecText | ❌ | ❌ | ✅ | ❌ | ❌ | +| Data2VecVision | ❌ | ❌ | ✅ | ✅ | ❌ | +| DeBERTa | ✅ | ✅ | ✅ | ✅ | ❌ | +| DeBERTa-v2 | ✅ | ✅ | ✅ | ✅ | ❌ | +| Decision Transformer | ❌ | ❌ | ✅ | ❌ | ❌ | +| DeiT | ❌ | ❌ | ✅ | ✅ | ❌ | +| DETR | ❌ | ❌ | ✅ | ❌ | ❌ | +| DistilBERT | ✅ | ✅ | ✅ | ✅ | ✅ | +| DPR | ✅ | ✅ | ✅ | ✅ | ❌ | +| DPT | ❌ | ❌ | ✅ | ❌ | ❌ | +| ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ | +| Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ | +| FairSeq Machine-Translation | ✅ | ❌ | ✅ | ❌ | ❌ | +| FlauBERT | ✅ | ❌ | ✅ | ✅ | ❌ | +| FLAVA | ❌ | ❌ | ✅ | ❌ | ❌ | +| FNet | ✅ | ✅ | ✅ | ❌ | ❌ | +| Funnel Transformer | ✅ | ✅ | ✅ | ✅ | ❌ | +| GLPN | ❌ | ❌ | ✅ | ❌ | ❌ | +| GPT Neo | ❌ | ❌ | ✅ | ❌ | ✅ | +| GPT NeoX | ❌ | ✅ | ✅ | ❌ | ❌ | +| GPT-J | ❌ | ❌ | ✅ | ✅ | ✅ | +| GroupViT | ❌ | ❌ | ✅ | ❌ | ❌ | +| Hubert | ❌ | ❌ | ✅ | ✅ | ❌ | +| I-BERT | ❌ | ❌ | ✅ | ❌ | ❌ | +| ImageGPT | ❌ | ❌ | ✅ | ❌ | ❌ | +| LayoutLM | ✅ | ✅ | ✅ | ✅ | ❌ | +| LayoutLMv2 | ✅ | ✅ | ✅ | ❌ | ❌ | +| LayoutLMv3 | ✅ | ✅ | ✅ | ❌ | ❌ | +| LED | ✅ | ✅ | ✅ | ✅ | ❌ | +| LeViT | ❌ | ❌ | ✅ | ❌ | ❌ | +| Longformer | ✅ | ✅ | ✅ | ✅ | ❌ | +| LongT5 | ❌ | ❌ | ✅ | ❌ | ✅ | +| LUKE | ✅ | ❌ | ✅ | ❌ | ❌ | +| LXMERT | ✅ | ✅ | ✅ | ✅ | ❌ | +| M-CTC-T | ❌ | ❌ | ✅ | ❌ | ❌ | +| M2M100 | ✅ | ❌ | ✅ | ❌ | ❌ | +| Marian | ✅ | ❌ | ✅ | ✅ | ✅ | +| MaskFormer | ❌ | ❌ | ✅ | ❌ | ❌ | +| mBART | ✅ | ✅ | ✅ | ✅ | ✅ | +| Megatron-BERT | ❌ | ❌ | ✅ | ❌ | ❌ | +| MobileBERT | ✅ | ✅ | ✅ | ✅ | ❌ | +| MobileViT | ❌ | ❌ | ✅ | ❌ | ❌ | +| MPNet | ✅ | ✅ | ✅ | ✅ | ❌ | +| MT5 | ✅ | ✅ | ✅ | ✅ | ✅ | +| MVP | ✅ | ✅ | ✅ | ❌ | ❌ | +| Nezha | ❌ | ❌ | ✅ | ❌ | ❌ | +| Nyströmformer | ❌ | ❌ | ✅ | ❌ | ❌ | +| OpenAI GPT | ✅ | ✅ | ✅ | ✅ | ❌ | +| OpenAI GPT-2 | ✅ | ✅ | ✅ | ✅ | ✅ | +| OPT | ❌ | ❌ | ✅ | ✅ | ✅ | +| OWL-ViT | ❌ | ❌ | ✅ | ❌ | ❌ | +| Pegasus | ✅ | ✅ | ✅ | ✅ | ✅ | +| Perceiver | ✅ | ❌ | ✅ | ❌ | ❌ | +| PLBart | ✅ | ❌ | ✅ | ❌ | ❌ | +| PoolFormer | ❌ | ❌ | ✅ | ❌ | ❌ | +| ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ | +| QDQBert | ❌ | ❌ | ✅ | ❌ | ❌ | +| RAG | ✅ | ❌ | ✅ | ✅ | ❌ | +| REALM | ✅ | ✅ | ✅ | ❌ | ❌ | +| Reformer | ✅ | ✅ | ✅ | ❌ | ❌ | +| RegNet | ❌ | ❌ | ✅ | ✅ | ❌ | +| RemBERT | ✅ | ✅ | ✅ | ✅ | ❌ | +| ResNet | ❌ | ❌ | ✅ | ✅ | ❌ | +| RetriBERT | ✅ | ✅ | ✅ | ❌ | ❌ | +| RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ | +| RoFormer | ✅ | ✅ | ✅ | ✅ | ✅ | +| SegFormer | ❌ | ❌ | ✅ | ✅ | ❌ | +| SEW | ❌ | ❌ | ✅ | ❌ | ❌ | +| SEW-D | ❌ | ❌ | ✅ | ❌ | ❌ | +| Speech Encoder decoder | ❌ | ❌ | ✅ | ❌ | ✅ | +| Speech2Text | ✅ | ❌ | ✅ | ✅ | ❌ | +| Speech2Text2 | ✅ | ❌ | ❌ | ❌ | ❌ | +| Splinter | ✅ | ✅ | ✅ | ❌ | ❌ | +| SqueezeBERT | ✅ | ✅ | ✅ | ❌ | ❌ | +| Swin Transformer | ❌ | ❌ | ✅ | ✅ | ❌ | +| Swin Transformer V2 | ❌ | ❌ | ✅ | ❌ | ❌ | +| T5 | ✅ | ✅ | ✅ | ✅ | ✅ | +| TAPAS | ✅ | ❌ | ✅ | ✅ | ❌ | +| Trajectory Transformer | ❌ | ❌ | ✅ | ❌ | ❌ | +| Transformer-XL | ✅ | ❌ | ✅ | ✅ | ❌ | +| TrOCR | ❌ | ❌ | ✅ | ❌ | ❌ | +| UniSpeech | ❌ | ❌ | ✅ | ❌ | ❌ | +| UniSpeechSat | ❌ | ❌ | ✅ | ❌ | ❌ | +| VAN | ❌ | ❌ | ✅ | ❌ | ❌ | +| VideoMAE | ❌ | ❌ | ✅ | ❌ | ❌ | +| ViLT | ❌ | ❌ | ✅ | ❌ | ❌ | +| Vision Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ | +| VisionTextDualEncoder | ❌ | ❌ | ✅ | ❌ | ✅ | +| VisualBERT | ❌ | ❌ | ✅ | ❌ | ❌ | +| ViT | ❌ | ❌ | ✅ | ✅ | ✅ | +| ViTMAE | ❌ | ❌ | ✅ | ✅ | ❌ | +| Wav2Vec2 | ✅ | ❌ | ✅ | ✅ | ✅ | +| Wav2Vec2-Conformer | ❌ | ❌ | ✅ | ❌ | ❌ | +| WavLM | ❌ | ❌ | ✅ | ❌ | ❌ | +| XGLM | ✅ | ✅ | ✅ | ❌ | ✅ | +| XLM | ✅ | ❌ | ✅ | ✅ | ❌ | +| XLM-ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ | +| XLM-RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ | +| XLM-RoBERTa-XL | ❌ | ❌ | ✅ | ❌ | ❌ | +| XLNet | ✅ | ✅ | ✅ | ✅ | ❌ | +| YOLOS | ❌ | ❌ | ✅ | ❌ | ❌ | +| YOSO | ❌ | ❌ | ✅ | ❌ | ❌ | + + diff --git a/docs/source/de/installation.mdx b/docs/source/de/installation.mdx new file mode 100644 index 00000000000000..3103830ee7fd8a --- /dev/null +++ b/docs/source/de/installation.mdx @@ -0,0 +1,246 @@ + + +# Installation + +Installieren Sie 🤗 Transformers für die Deep-Learning-Bibliothek, mit der Sie arbeiten, richten Sie Ihren Cache ein und konfigurieren Sie 🤗 Transformers optional für den Offline-Betrieb. + +🤗 Transformers wurde unter Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, und Flax getestet. Folgen Sie den Installationsanweisungen unten für die von Ihnen verwendete Deep-Learning-Bibliothek: + +* [PyTorch](https://pytorch.org/get-started/locally/) installation instructions. +* [TensorFlow 2.0](https://www.tensorflow.org/install/pip) installation instructions. +* [Flax](https://flax.readthedocs.io/en/latest/) installation instructions. + +## Installation mit pip + +Sie sollten 🤗 Transformers in einer [virtuellen Umgebung](https://docs.python.org/3/library/venv.html) installieren. Wenn Sie mit virtuellen Python-Umgebungen nicht vertraut sind, werfen Sie einen Blick auf diese [Anleitung](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Eine virtuelle Umgebung macht es einfacher, verschiedene Projekte zu verwalten und Kompatibilitätsprobleme zwischen Abhängigkeiten zu vermeiden. + +Beginnen wir mit der Erstellung einer virtuellen Umgebung in Ihrem Projektverzeichnis: + + +```bash +python -m venv .env +``` + +Aktivieren wir die virtuelle Umgebung. Unter Linux und MacOs: + +```bash +source .env/bin/activate +``` +Aktivieren wir die virtuelle Umgebung unter Windows + +```bash +.env/Scripts/activate +``` + +Jetzt können wir die 🤗 Transformers mit dem folgenden Befehl installieren: + +```bash +pip install transformers +``` + +Bei reiner CPU-Unterstützung können wir 🤗 Transformers und eine Deep-Learning-Bibliothek bequem in einer Zeile installieren. Installieren wir zum Beispiel 🤗 Transformers und PyTorch mit: + +```bash +pip install transformers[torch] +``` + +🤗 Transformers und TensorFlow 2.0: + +```bash +pip install transformers[tf-cpu] +``` + +🤗 Transformers und Flax: + +```bash +pip install transformers[flax] +``` + +Überprüfen wir abschließend, ob 🤗 Transformers ordnungsgemäß installiert wurde, indem wir den folgenden Befehl ausführen. Es wird ein vortrainiertes Modell heruntergeladen: + +```bash +python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" +``` + +Dann wird die Kategorie und die Wahrscheinlichkeit ausgegeben: + +```bash +[{'label': 'POSITIVE', 'score': 0.9998704791069031}] +``` + +## Installation aus dem Code + +Installieren wir 🤗 Transformers aus dem Quellcode mit dem folgenden Befehl: + +```bash +pip install git+https://github.com/huggingface/transformers +``` + +Dieser Befehl installiert die aktuelle `main` Version und nicht die neueste `stable` Version. Die `main`-Version ist nützlich, um mit den neuesten Entwicklungen Schritt zu halten. Zum Beispiel, wenn ein Fehler seit der letzten offiziellen Version behoben wurde, aber eine neue Version noch nicht veröffentlicht wurde. Das bedeutet jedoch, dass die "Hauptversion" nicht immer stabil ist. Wir bemühen uns, die Hauptversion einsatzbereit zu halten, und die meisten Probleme werden normalerweise innerhalb weniger Stunden oder eines Tages behoben. Wenn Sie auf ein Problem stoßen, öffnen Sie bitte ein [Issue] (https://github.com/huggingface/transformers/issues), damit wir es noch schneller beheben können! + +Überprüfen wir, ob 🤗 Transformers richtig installiert wurde, indem Sie den folgenden Befehl ausführen: + + +```bash +python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" +``` + +## Editierbare Installation + +Sie benötigen eine bearbeitbare Installation, wenn Sie: + +* die "Haupt"-Version des Quellcodes verwenden möchten. +* Zu 🤗 Transformers beitragen und Änderungen am Code testen wollen. + +Klonen Sie das Repository und installieren 🤗 Transformers mit den folgenden Befehlen: + +```bash +git clone https://github.com/huggingface/transformers.git +cd transformers +pip install -e . +``` + +Diese Befehle verknüpfen den Ordner, in den Sie das Repository geklont haben, mit den Pfaden Ihrer Python-Bibliotheken. Python wird nun in dem Ordner suchen, in den Sie geklont haben, zusätzlich zu den normalen Bibliothekspfaden. Wenn zum Beispiel Ihre Python-Pakete normalerweise in `~/anaconda3/envs/main/lib/python3.7/site-packages/` installiert sind, wird Python auch den Ordner durchsuchen, in den Sie geklont haben: `~/transformers/`. + + + + +Sie müssen den Ordner `transformers` behalten, wenn Sie die Bibliothek weiter verwenden wollen. + + + +Jetzt können Sie Ihren Klon mit dem folgenden Befehl ganz einfach auf die neueste Version von 🤗 Transformers aktualisieren: + + +```bash +cd ~/transformers/ +git pull +``` + +Ihre Python-Umgebung wird beim nächsten Ausführen die `main`-Version von 🤗 Transformers finden. + +## Installation mit conda + +Installation von dem conda Kanal `huggingface`: + +```bash +conda install -c huggingface transformers +``` + +## Cache Einrichtung + +Vorgefertigte Modelle werden heruntergeladen und lokal zwischengespeichert unter: `~/.cache/huggingface/hub`. Dies ist das Standardverzeichnis, das durch die Shell-Umgebungsvariable "TRANSFORMERS_CACHE" vorgegeben ist. Unter Windows wird das Standardverzeichnis durch `C:\Benutzer\Benutzername\.cache\huggingface\hub` angegeben. Sie können die unten aufgeführten Shell-Umgebungsvariablen - in der Reihenfolge ihrer Priorität - ändern, um ein anderes Cache-Verzeichnis anzugeben: + +1. Shell-Umgebungsvariable (Standard): `HUGGINGFACE_HUB_CACHE` oder `TRANSFORMERS_CACHE`. +2. Shell-Umgebungsvariable: `HF_HOME`. +3. Shell-Umgebungsvariable: `XDG_CACHE_HOME` + `/huggingface`. + + + + +Transformers verwendet die Shell-Umgebungsvariablen `PYTORCH_TRANSFORMERS_CACHE` oder `PYTORCH_PRETRAINED_BERT_CACHE`, wenn Sie von einer früheren Iteration dieser Bibliothek kommen und diese Umgebungsvariablen gesetzt haben, sofern Sie nicht die Shell-Umgebungsvariable `TRANSFORMERS_CACHE` angeben. + + + +## Offline Modus + +Transformers ist in der Lage, in einer Firewall- oder Offline-Umgebung zu laufen, indem es nur lokale Dateien verwendet. Setzen Sie die Umgebungsvariable `TRANSFORMERS_OFFLINE=1`, um dieses Verhalten zu aktivieren. + + + +Fügen sie [🤗 Datasets](https://huggingface.co/docs/datasets/) zu Ihrem Offline-Trainingsworkflow hinzufügen, indem Sie die Umgebungsvariable `HF_DATASETS_OFFLINE=1` setzen. + + + +So würden Sie beispielsweise ein Programm in einem normalen Netzwerk mit einer Firewall für externe Instanzen mit dem folgenden Befehl ausführen: + +```bash +python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ... +``` + +Führen Sie das gleiche Programm in einer Offline-Instanz mit aus: + +```bash +HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \ +python examples/pytorch/translation/run_translation.py --model_name_or_path t5-small --dataset_name wmt16 --dataset_config ro-en ... +``` + +Das Skript sollte nun laufen, ohne sich aufzuhängen oder eine Zeitüberschreitung abzuwarten, da es weiß, dass es nur nach lokalen Dateien suchen soll. + + +### Abrufen von Modellen und Tokenizern zur Offline-Verwendung + +Eine andere Möglichkeit, 🤗 Transformers offline zu verwenden, besteht darin, die Dateien im Voraus herunterzuladen und dann auf ihren lokalen Pfad zu verweisen, wenn Sie sie offline verwenden müssen. Es gibt drei Möglichkeiten, dies zu tun: + +* Laden Sie eine Datei über die Benutzeroberfläche des [Model Hub](https://huggingface.co/models) herunter, indem Sie auf das ↓-Symbol klicken. + + ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) + +* Verwenden Sie den [PreTrainedModel.from_pretrained] und [PreTrainedModel.save_pretrained] Workflow: + + 1. Laden Sie Ihre Dateien im Voraus mit [`PreTrainedModel.from_pretrained`] herunter: + + ```py + >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM + + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") + ``` + + 2. Speichern Sie Ihre Dateien in einem bestimmten Verzeichnis mit [`PreTrainedModel.save_pretrained`]: + + ```py + >>> tokenizer.save_pretrained("./your/path/bigscience_t0") + >>> model.save_pretrained("./your/path/bigscience_t0") + ``` + + 3. Wenn Sie nun offline sind, laden Sie Ihre Dateien mit [`PreTrainedModel.from_pretrained`] aus dem bestimmten Verzeichnis: + + ```py + >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") + >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") + ``` + +* Programmatisches Herunterladen von Dateien mit der [huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub) Bibliothek: + + 1. Installieren Sie die "huggingface_hub"-Bibliothek in Ihrer virtuellen Umgebung: + + ```bash + python -m pip install huggingface_hub + ``` + + 2. Verwenden Sie die Funktion [`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub), um eine Datei in einen bestimmten Pfad herunterzuladen. Der folgende Befehl lädt zum Beispiel die Datei "config.json" aus dem Modell [T0](https://huggingface.co/bigscience/T0_3B) in den gewünschten Pfad herunter: + + ```py + >>> from huggingface_hub import hf_hub_download + + >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") + ``` + +Sobald Ihre Datei heruntergeladen und lokal zwischengespeichert ist, geben Sie den lokalen Pfad an, um sie zu laden und zu verwenden: + +```py +>>> from transformers import AutoConfig + +>>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") +``` + + + +Weitere Informationen zum Herunterladen von Dateien, die auf dem Hub gespeichert sind, finden Sie im Abschnitt [Wie man Dateien vom Hub herunterlädt] (https://huggingface.co/docs/hub/how-to-downstream). + + diff --git a/docs/source/de/quicktour.mdx b/docs/source/de/quicktour.mdx new file mode 100644 index 00000000000000..4c668bf419b134 --- /dev/null +++ b/docs/source/de/quicktour.mdx @@ -0,0 +1,428 @@ + + +# Schnellstart + +[[open-in-colab]] + +Mit 🤗 Transformers können Sie sofort loslegen! Verwenden Sie die [`pipeline`] für schnelle Inferenz und laden Sie schnell ein vortrainiertes Modell und einen Tokenizer mit einer [AutoClass](./model_doc/auto), um Ihre Text-, Bild- oder Audioaufgabe zu lösen. + + + +Alle in der Dokumentation vorgestellten Codebeispiele haben oben links einen Umschalter für PyTorch und TensorFlow. Wenn +nicht, wird erwartet, dass der Code für beide Backends ohne Änderungen funktioniert. + + + +## Pipeline + +[`pipeline`] ist der einfachste Weg, ein vortrainiertes Modell für eine bestimmte Aufgabe zu verwenden. + + + +Die [`pipeline`] unterstützt viele gängige Aufgaben: + +**Text**: +* Stimmungsanalyse: Klassifizierung der Polarität eines gegebenen Textes. +* Textgenerierung (auf Englisch): Generierung von Text aus einer gegebenen Eingabe. +* Name-Entity-Recognition (NER): Kennzeichnung jedes Worts mit der Entität, die es repräsentiert (Person, Datum, Ort usw.). +* Beantwortung von Fragen: Extrahieren der Antwort aus dem Kontext, wenn ein gewisser Kontext und eine Frage gegeben sind. +* Fill-mask: Ausfüllen von Lücken in einem Text mit maskierten Wörtern. +* Zusammenfassung: Erstellung einer Zusammenfassung einer langen Text- oder Dokumentensequenz. +* Übersetzung: Übersetzen eines Textes in eine andere Sprache. +* Merkmalsextraktion: Erstellen einer Tensordarstellung des Textes. + +**Bild**: +* Bildklassifizierung: Klassifizierung eines Bildes. +* Bildsegmentierung: Klassifizierung jedes Pixels in einem Bild. +* Objekterkennung: Erkennen von Objekten innerhalb eines Bildes. + +**Audio**: +* Audioklassifizierung: Zuweisung eines Labels zu einem bestimmten Audiosegment. +* Automatische Spracherkennung (ASR): Transkription von Audiodaten in Text. + + + +Für mehr Details über die [`pipeline`] und assoziierte Aufgaben, schauen Sie in die Dokumentation [hier](./main_classes/pipelines). + + + +### Verwendung der Pipeline + +Im folgenden Beispiel werden Sie die [`pipeline`] für die Stimmungsanalyse verwenden. + +Installieren Sie die folgenden Abhängigkeiten, falls Sie dies nicht bereits getan haben: + + + +```bash +pip install torch +``` + + +```bash +pip install tensorflow +``` + + + +Importieren sie die [`pipeline`] und spezifizieren sie die Aufgabe, welche sie lösen möchten: + +```py +>>> from transformers import pipeline + +>>> classifier = pipeline("sentiment-analysis") +``` + +Die Pipeline lädt ein standardmäßiges [vortrainiertes Modell] (https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) und einen Tokenizer für die Stimmungs-Analyse herunter und speichert sie. Jetzt können Sie den "Klassifikator" auf Ihren Zieltext anwenden: + +```py +>>> classifier("We are very happy to show you the 🤗 Transformers library.") +[{'label': 'POSITIVE', 'score': 0.9998}] +``` + +For more than one sentence, pass a list of sentences to the [`pipeline`] which returns a list of dictionaries: + +```py +>>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) +>>> for result in results: +... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") +label: POSITIVE, with score: 0.9998 +label: NEGATIVE, with score: 0.5309 +``` + +Die [`pipeline`] kann auch über einen ganzen Datensatz iterieren. Starten wir mit der Installation der [🤗 Datasets](https://huggingface.co/docs/datasets/) Bibliothek: + +```bash +pip install datasets +``` + +Erstellen wir eine [`pipeline`] mit der Aufgabe die wir lösen und dem Modell welches wir nutzen möchten. + +```py +>>> import torch +>>> from transformers import pipeline + +>>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") +``` + +Als nächstes laden wir den Datensatz (siehe 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) für mehr Details) welches wir nutzen möchten. Zum Beispiel laden wir den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz: + +```py +>>> from datasets import load_dataset, Audio + +>>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT +``` + +Wir müssen sicherstellen, dass die Abtastrate des Datensatzes der Abtastrate entspricht, mit der `facebook/wav2vec2-base-960h` trainiert wurde. + +```py +>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) +``` + +Audiodateien werden automatisch geladen und neu abgetastet, wenn die Spalte "audio" aufgerufen wird. +Extrahieren wir die rohen Wellenform-Arrays der ersten 4 Beispiele und übergeben wir sie als Liste an die Pipeline: + +```py +>>> result = speech_recognizer(dataset[:4]["audio"]) +>>> print([d["text"] for d in result]) +['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FODING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I THURN A JOIN A COUNT'] +``` + +Bei einem größeren Datensatz mit vielen Eingaben (wie bei Sprache oder Bildverarbeitung) sollten Sie einen Generator anstelle einer Liste übergeben, der alle Eingaben in den Speicher lädt. Weitere Informationen finden Sie in der [Pipeline-Dokumentation](./main_classes/pipelines). + +### Ein anderes Modell und einen anderen Tokenizer in der Pipeline verwenden + +Die [`pipeline`] kann jedes Modell aus dem [Model Hub] (https://huggingface.co/models) verwenden, wodurch es einfach ist, die [`pipeline`] für andere Anwendungsfälle anzupassen. Wenn Sie beispielsweise ein Modell wünschen, das französischen Text verarbeiten kann, verwenden Sie die Tags im Model Hub, um nach einem geeigneten Modell zu filtern. Das oberste gefilterte Ergebnis liefert ein mehrsprachiges [BERT-Modell](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment), das auf die Stimmungsanalyse abgestimmt ist. Großartig, verwenden wir dieses Modell! + +```py +>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" +``` + + + +Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` below): + +```py +>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification + +>>> model = AutoModelForSequenceClassification.from_pretrained(model_name) +>>> tokenizer = AutoTokenizer.from_pretrained(model_name) +``` + + +Use the [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` below): + +```py +>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification + +>>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) +>>> tokenizer = AutoTokenizer.from_pretrained(model_name) +``` + + + +Dann können Sie das Modell und den Tokenizer in der [`pipeline`] angeben und den `Klassifikator` auf Ihren Zieltext anwenden: + +```py +>>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) +>>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") +[{'label': '5 stars', 'score': 0.7273}] +``` + +Wenn Sie kein Modell für Ihren Anwendungsfall finden können, müssen Sie ein vortrainiertes Modell auf Ihren Daten feinabstimmen. Schauen Sie sich unser [Feinabstimmungs-Tutorial](./training) an, um zu erfahren, wie das geht. Und schließlich, nachdem Sie Ihr trainiertes Modell verfeinert haben, sollten Sie es mit der Community im Model Hub teilen (siehe Tutorial [hier](./model_sharing)), um NLP für alle zu demokratisieren! 🤗 + +## AutoClass + + + +Unter der Haube arbeiten die Klassen [`AutoModelForSequenceClassification`] und [`AutoTokenizer`] zusammen, um die [`pipeline`] zu betreiben. Eine [`AutoClass`](./model_doc/auto) ist eine Abkürzung, die automatisch die Architektur eines trainierten Modells aus dessen Namen oder Pfad abruft. Sie müssen nur die passende `AutoClass` für Ihre Aufgabe und den zugehörigen Tokenizer mit [`AutoTokenizer`] auswählen. + +Kehren wir zu unserem Beispiel zurück und sehen wir uns an, wie Sie die `AutoClass` verwenden können, um die Ergebnisse der [`pipeline`] zu replizieren. + +### AutoTokenizer + +Ein Tokenizer ist für die Vorverarbeitung von Text in ein für das Modell verständliches Format zuständig. Zunächst zerlegt der Tokenisierer den Text in Wörter, die *Token* genannt werden. Es gibt mehrere Regeln für den Tokenisierungsprozess, z. B. wie und auf welcher Ebene ein Wort aufgespalten wird (weitere Informationen über Tokenisierung [hier](./tokenizer_summary)). Das Wichtigste ist jedoch, dass Sie den Tokenizer mit demselben Modellnamen instanziieren müssen, um sicherzustellen, dass Sie dieselben Tokenisierungsregeln verwenden, mit denen ein Modell zuvor trainiert wurde. +Laden sie einen Tokenizer mit [`AutoTokenizer`]: + +```py +>>> from transformers import AutoTokenizer + +>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" +>>> tokenizer = AutoTokenizer.from_pretrained(model_name) +``` + +Anschließend wandelt der Tokenizer die Token in Zahlen um, um einen Tensor als Eingabe für das Modell zu konstruieren. Dieser wird als *Vokabular* des Modells bezeichnet. + +Übergeben Sie Ihren Text an den Tokenizer: + +```py +>>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") +>>> print(encoding) +{'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], + 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} +``` + +Der Tokenizer gibt ein Wörterbuch zurück, das Folgendes enthält: + +* [input_ids](./glossary#input-ids): numerische Repräsentationen Ihrer Token. +* [atttention_mask](.glossary#attention-mask): gibt an, welche Token beachtet werden sollen. + +Genau wie die [`pipeline`] akzeptiert der Tokenizer eine Liste von Eingaben. Darüber hinaus kann der Tokenizer den Text auch auffüllen und kürzen, um einen Stapel mit einheitlicher Länge zurückzugeben: + + + +```py +>>> pt_batch = tokenizer( +... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], +... padding=True, +... truncation=True, +... max_length=512, +... return_tensors="pt", +... ) +``` + + +```py +>>> tf_batch = tokenizer( +... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], +... padding=True, +... truncation=True, +... max_length=512, +... return_tensors="tf", +... ) +``` + + + +Lesen Sie das Tutorial [preprocessing](./preprocessing) für weitere Details zur Tokenisierung. + +### AutoModel + + + +🤗 Transformers bietet eine einfache und einheitliche Möglichkeit, vortrainierte Instanzen zu laden. Das bedeutet, dass Sie ein [`AutoModel`] laden können, wie Sie einen [`AutoTokenizer`] laden würden. Der einzige Unterschied ist die Auswahl des richtigen [`AutoModel`] für die Aufgabe. Da Sie eine Text- oder Sequenzklassifizierung vornehmen, laden Sie [`AutoModelForSequenceClassification`]: + +```py +>>> from transformers import AutoModelForSequenceClassification + +>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" +>>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) +``` + + + +In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse für welche Aufgabe zu verwenden ist. + + + +Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell übergeben. Sie müssen nur das Wörterbuch entpacken, indem Sie `**` hinzufügen: + +```py +>>> pt_outputs = pt_model(**pt_batch) +``` + +Das Modell gibt die endgültigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten: + +```py +>>> from torch import nn + +>>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) +>>> print(pt_predictions) +tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], + [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=) +``` + + +🤗 Transformers bietet eine einfache und einheitliche Methode zum Laden von vortrainierten Instanzen. Das bedeutet, dass Sie ein [`TFAutoModel`] genauso laden können, wie Sie einen [`AutoTokenizer`] laden würden. Der einzige Unterschied ist die Auswahl des richtigen [`TFAutoModel`] für die Aufgabe. Da Sie Text - oder Sequenz - Klassifizierung machen, laden Sie [`TFAutoModelForSequenceClassification`]: + +```py +>>> from transformers import TFAutoModelForSequenceClassification + +>>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" +>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) +``` + + + +In der [Aufgabenzusammenfassung](./task_summary) steht, welche [AutoModel]-Klasse für welche Aufgabe zu verwenden ist. + + + +Jetzt können Sie Ihren vorverarbeiteten Stapel von Eingaben direkt an das Modell übergeben, indem Sie die Wörterbuchschlüssel direkt an die Tensoren übergeben: + +```py +>>> tf_outputs = tf_model(tf_batch) +``` + +Das Modell gibt die endgültigen Aktivierungen in dem Attribut "logits" aus. Wenden Sie die Softmax-Funktion auf die "logits" an, um die Wahrscheinlichkeiten zu erhalten: + +```py +>>> import tensorflow as tf + +>>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) +>>> tf_predictions # doctest: +IGNORE_RESULT +``` + + + + + +Alle 🤗 Transformers-Modelle (PyTorch oder TensorFlow) geben die Tensoren *vor* der endgültigen Aktivierungsfunktion +Funktion (wie Softmax) aus, da die endgültige Aktivierungsfunktion oft mit dem Verlusten verschmolzen ist. + + + +Modelle sind ein standardmäßiges [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) oder ein [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model), sodass Sie sie in Ihrer üblichen Trainingsschleife verwenden können. Um jedoch die Dinge einfacher zu machen, bietet 🤗 Transformers eine [`Trainer`]-Klasse für PyTorch, die Funktionalität für verteiltes Training, gemischte Präzision und mehr bietet. Für TensorFlow können Sie die Methode `fit` aus [Keras](https://keras.io/) verwenden. Siehe das [training tutorial](./training) für weitere Details. + + + +Transformers-Modellausgaben sind spezielle Datenklassen, so dass ihre Attribute in einer IDE automatisch vervollständigt werden. +Die Modellausgänge verhalten sich auch wie ein Tupel oder ein Wörterbuch (z.B. können Sie mit einem Integer, einem Slice oder einem String indexieren), wobei die Attribute, die "None" sind, ignoriert werden. + + + +### Modell speichern + + + +Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer speichern, indem Sie [`PreTrainedModel.save_pretrained`] verwenden: + +```py +>>> pt_save_directory = "./pt_save_pretrained" +>>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT +>>> pt_model.save_pretrained(pt_save_directory) +``` + +Wenn Sie bereit sind, das Modell erneut zu verwenden, laden Sie es mit [`PreTrainedModel.from_pretrained`]: + +```py +>>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") +``` + + +Sobald Ihr Modell feinabgestimmt ist, können Sie es mit seinem Tokenizer unter Verwendung von [`TFPreTrainedModel.save_pretrained`] speichern: + +```py +>>> tf_save_directory = "./tf_save_pretrained" +>>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT +>>> tf_model.save_pretrained(tf_save_directory) +``` + +Wenn Sie bereit sind, das Modell wieder zu verwenden, laden Sie es mit [`TFPreTrainedModel.from_pretrained`]: + +```py +>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") +``` + + + +Ein besonders cooles 🤗 Transformers-Feature ist die Möglichkeit, ein Modell zu speichern und es entweder als PyTorch- oder TensorFlow-Modell wieder zu laden. Der Parameter "from_pt" oder "from_tf" kann das Modell von einem Framework in das andere konvertieren: + + + +```py +>>> from transformers import AutoModel + +>>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) +>>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) +``` + + +```py +>>> from transformers import TFAutoModel + +>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) +>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) +``` + + + +## Custom model builds + +Sie können die Konfigurationsklasse des Modells ändern, um zu bestimmen, wie ein Modell aufgebaut ist. Die Konfiguration legt die Attribute eines Modells fest, z. B. die Anzahl der verborgenen Schichten oder der Aufmerksamkeitsköpfe. Wenn Sie ein Modell aus einer benutzerdefinierten Konfigurationsklasse initialisieren, beginnen Sie bei Null. Die Modellattribute werden zufällig initialisiert, und Sie müssen das Modell trainieren, bevor Sie es verwenden können, um aussagekräftige Ergebnisse zu erhalten. + +Beginnen Sie mit dem Import von [`AutoConfig`] und laden Sie dann das trainierte Modell, das Sie ändern möchten. Innerhalb von [`AutoConfig.from_pretrained`] können Sie das Attribut angeben, das Sie ändern möchten, z. B. die Anzahl der Aufmerksamkeitsköpfe: + +```py +>>> from transformers import AutoConfig + +>>> my_config = AutoConfig.from_pretrained("distilbert-base-uncased", n_heads=12) +``` + + + +Create a model from your custom configuration with [`AutoModel.from_config`]: + +```py +>>> from transformers import AutoModel + +>>> my_model = AutoModel.from_config(my_config) +``` + + +Create a model from your custom configuration with [`TFAutoModel.from_config`]: + +```py +>>> from transformers import TFAutoModel + +>>> my_model = TFAutoModel.from_config(my_config) +``` + + + +Weitere Informationen zur Erstellung von benutzerdefinierten Konfigurationen finden Sie in der Anleitung [Erstellen einer benutzerdefinierten Architektur](./create_a_model). + +## Wie geht es weiter? + +Nachdem Sie nun die 🤗 Transformers-Kurztour abgeschlossen haben, schauen Sie sich unsere Anleitungen an und erfahren Sie, wie Sie spezifischere Dinge tun können, wie das Schreiben eines benutzerdefinierten Modells, die Feinabstimmung eines Modells für eine Aufgabe und wie man ein Modell mit einem Skript trainiert. Wenn Sie mehr über die Kernkonzepte von 🤗 Transformers erfahren möchten, nehmen Sie sich eine Tasse Kaffee und werfen Sie einen Blick auf unsere konzeptionellen Leitfäden! From d53dffec6ef5f0cf28df3a1e7f70f1c5da5762ce Mon Sep 17 00:00:00 2001 From: iiLaurens Date: Thu, 11 Aug 2022 15:54:43 +0200 Subject: [PATCH 078/539] Deberta V2: Fix critical trace warnings to allow ONNX export (#18272) * Fix critical trace warnings to allow ONNX export * Force input to `sqrt` to be float type * Cleanup code * Remove unused import statement * Update model sew * Small refactor Co-authored-by: Michael Benayoun * Use broadcasting instead of repeat * Implement suggestion Co-authored-by: Michael Benayoun * Match deberta v2 changes in sew_d * Improve code quality * Update code quality * Consistency of small refactor * Match changes in sew_d Co-authored-by: Michael Benayoun --- .../models/deberta_v2/modeling_deberta_v2.py | 30 +++++++++++-------- .../models/sew_d/modeling_sew_d.py | 28 ++++++++++------- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index a513a8280ed51d..3243ee108d488b 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -14,11 +14,9 @@ # limitations under the License. """ PyTorch DeBERTa-v2 model.""" -import math from collections.abc import Sequence from typing import Optional, Tuple, Union -import numpy as np import torch import torch.utils.checkpoint from torch import nn @@ -552,11 +550,17 @@ def custom_forward(*inputs): def make_log_bucket_position(relative_pos, bucket_size, max_position): - sign = np.sign(relative_pos) + sign = torch.sign(relative_pos) mid = bucket_size // 2 - abs_pos = np.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, np.abs(relative_pos)) - log_pos = np.ceil(np.log(abs_pos / mid) / np.log((max_position - 1) / mid) * (mid - 1)) + mid - bucket_pos = np.where(abs_pos <= mid, relative_pos, log_pos * sign).astype(np.int) + abs_pos = torch.where( + (relative_pos < mid) & (relative_pos > -mid), + torch.tensor(mid - 1).type_as(relative_pos), + torch.abs(relative_pos), + ) + log_pos = ( + torch.ceil(torch.log(abs_pos / mid) / torch.log(torch.tensor((max_position - 1) / mid)) * (mid - 1)) + mid + ) + bucket_pos = torch.where(abs_pos <= mid, relative_pos.type_as(log_pos), log_pos * sign) return bucket_pos @@ -578,12 +582,12 @@ def build_relative_position(query_size, key_size, bucket_size=-1, max_position=- `torch.LongTensor`: A tensor with shape [1, query_size, key_size] """ - q_ids = np.arange(0, query_size) - k_ids = np.arange(0, key_size) - rel_pos_ids = q_ids[:, None] - np.tile(k_ids, (q_ids.shape[0], 1)) + q_ids = torch.arange(0, query_size) + k_ids = torch.arange(0, key_size) + rel_pos_ids = q_ids[:, None] - k_ids[None, :] if bucket_size > 0 and max_position > 0: rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position) - rel_pos_ids = torch.tensor(rel_pos_ids, dtype=torch.long) + rel_pos_ids = rel_pos_ids.to(torch.long) rel_pos_ids = rel_pos_ids[:query_size, :] rel_pos_ids = rel_pos_ids.unsqueeze(0) return rel_pos_ids @@ -712,7 +716,7 @@ def forward( scale_factor += 1 if "p2c" in self.pos_att_type: scale_factor += 1 - scale = math.sqrt(query_layer.size(-1) * scale_factor) + scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale if self.relative_attention: rel_embeddings = self.pos_dropout(rel_embeddings) @@ -787,7 +791,7 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ score = 0 # content->position if "c2p" in self.pos_att_type: - scale = math.sqrt(pos_key_layer.size(-1) * scale_factor) + scale = torch.sqrt(torch.tensor(pos_key_layer.size(-1), dtype=torch.float) * scale_factor) c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2)) c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1) c2p_att = torch.gather( @@ -799,7 +803,7 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ # position->content if "p2c" in self.pos_att_type: - scale = math.sqrt(pos_query_layer.size(-1) * scale_factor) + scale = torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor) if key_layer.size(-2) != query_layer.size(-2): r_pos = build_relative_position( key_layer.size(-2), diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index a9a231aec1d8e6..fe5836a80f36e0 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -194,11 +194,17 @@ def compute_num_masked_span(input_length): # Copied from transformers.models.deberta_v2.modeling_deberta_v2.make_log_bucket_position def make_log_bucket_position(relative_pos, bucket_size, max_position): - sign = np.sign(relative_pos) + sign = torch.sign(relative_pos) mid = bucket_size // 2 - abs_pos = np.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, np.abs(relative_pos)) - log_pos = np.ceil(np.log(abs_pos / mid) / np.log((max_position - 1) / mid) * (mid - 1)) + mid - bucket_pos = np.where(abs_pos <= mid, relative_pos, log_pos * sign).astype(np.int) + abs_pos = torch.where( + (relative_pos < mid) & (relative_pos > -mid), + torch.tensor(mid - 1).type_as(relative_pos), + torch.abs(relative_pos), + ) + log_pos = ( + torch.ceil(torch.log(abs_pos / mid) / torch.log(torch.tensor((max_position - 1) / mid)) * (mid - 1)) + mid + ) + bucket_pos = torch.where(abs_pos <= mid, relative_pos.type_as(log_pos), log_pos * sign) return bucket_pos @@ -221,12 +227,12 @@ def build_relative_position(query_size, key_size, bucket_size=-1, max_position=- `torch.LongTensor`: A tensor with shape [1, query_size, key_size] """ - q_ids = np.arange(0, query_size) - k_ids = np.arange(0, key_size) - rel_pos_ids = q_ids[:, None] - np.tile(k_ids, (q_ids.shape[0], 1)) + q_ids = torch.arange(0, query_size) + k_ids = torch.arange(0, key_size) + rel_pos_ids = q_ids[:, None] - k_ids[None, :] if bucket_size > 0 and max_position > 0: rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position) - rel_pos_ids = torch.tensor(rel_pos_ids, dtype=torch.long) + rel_pos_ids = rel_pos_ids.to(torch.long) rel_pos_ids = rel_pos_ids[:query_size, :] rel_pos_ids = rel_pos_ids.unsqueeze(0) return rel_pos_ids @@ -784,7 +790,7 @@ def forward( scale_factor += 1 if "p2c" in self.pos_att_type: scale_factor += 1 - scale = math.sqrt(query_layer.size(-1) * scale_factor) + scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale if self.relative_attention: rel_embeddings = self.pos_dropout(rel_embeddings) @@ -859,7 +865,7 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ score = 0 # content->position if "c2p" in self.pos_att_type: - scale = math.sqrt(pos_key_layer.size(-1) * scale_factor) + scale = torch.sqrt(torch.tensor(pos_key_layer.size(-1), dtype=torch.float) * scale_factor) c2p_att = torch.bmm(query_layer, pos_key_layer.transpose(-1, -2)) c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1) c2p_att = torch.gather( @@ -871,7 +877,7 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ # position->content if "p2c" in self.pos_att_type: - scale = math.sqrt(pos_query_layer.size(-1) * scale_factor) + scale = torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor) if key_layer.size(-2) != query_layer.size(-2): r_pos = build_relative_position( key_layer.size(-2), From 42b8940b346fa99622c52a91d2704dbe8afb20cb Mon Sep 17 00:00:00 2001 From: Michael Benayoun Date: Thu, 11 Aug 2022 16:34:44 +0200 Subject: [PATCH 079/539] [FX] _generate_dummy_input supports audio-classification models for labels (#18580) * Support audio classification architectures for labels generation, as well as provides a flag to print warnings or not * Use ENV_VARS_TRUE_VALUES --- src/transformers/utils/fx.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index 2198928eadb325..990f278b0d5066 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -19,6 +19,7 @@ import inspect import math import operator +import os import random import warnings from typing import Any, Callable, Dict, List, Optional, Type, Union @@ -48,11 +49,12 @@ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_MAPPING_NAMES, ) -from ..utils import TORCH_FX_REQUIRED_VERSION, is_torch_fx_available +from ..utils import ENV_VARS_TRUE_VALUES, TORCH_FX_REQUIRED_VERSION, is_torch_fx_available from ..utils.versions import importlib_metadata logger = logging.get_logger(__name__) +_IS_IN_DEBUG_MODE = os.environ.get("FX_DEBUG_MODE", "").upper() in ENV_VARS_TRUE_VALUES def _generate_supported_model_class_names( @@ -678,7 +680,12 @@ def _generate_dummy_input( if input_name in ["labels", "start_positions", "end_positions"]: batch_size = shape[0] - if model_class_name in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES): + if model_class_name in [ + *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), + *get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES), + *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), + *get_values(MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES), + ]: inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), @@ -710,11 +717,6 @@ def _generate_dummy_input( ) inputs_dict["labels"] = torch.zeros(*labels_shape, dtype=labels_dtype, device=device) - elif model_class_name in [ - *get_values(MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES), - *get_values(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES), - ]: - inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in [ *get_values(MODEL_FOR_PRETRAINING_MAPPING_NAMES), *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES), @@ -725,7 +727,9 @@ def _generate_dummy_input( ]: inputs_dict["labels"] = torch.zeros(shape, dtype=torch.long, device=device) else: - raise NotImplementedError(f"{model_class_name} not supported yet.") + raise NotImplementedError( + f"Generating the dummy input named {input_name} for {model_class_name} is not supported yet." + ) elif "pixel_values" in input_name: batch_size = shape[0] image_size = getattr(model.config, "image_size", None) @@ -846,7 +850,8 @@ def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, pr raise ValueError("Don't support composite output yet") rv.install_metadata(meta_out) except Exception as e: - warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}") + if _IS_IN_DEBUG_MODE: + warnings.warn(f"Could not compute metadata for {kind} target {target}: {e}") return rv From c23cbdff4c097d3f3039999827a675cf8f06a32e Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 11 Aug 2022 10:35:47 -0400 Subject: [PATCH 080/539] Fix docstrings with last version of hf-doc-builder styler (#18581) * Fix docstrings with last version of hf-doc-builder styler * Remove empty Parameter block --- src/transformers/benchmark/benchmark_utils.py | 5 ----- src/transformers/generation_flax_utils.py | 1 - src/transformers/generation_tf_utils.py | 5 ----- src/transformers/generation_utils.py | 5 ----- src/transformers/modelcard.py | 2 -- src/transformers/models/auto/auto_factory.py | 1 - src/transformers/models/flaubert/tokenization_flaubert.py | 1 - src/transformers/models/fsmt/tokenization_fsmt.py | 1 - src/transformers/models/perceiver/modeling_perceiver.py | 1 - src/transformers/models/tapex/tokenization_tapex.py | 1 - src/transformers/models/transfo_xl/modeling_transfo_xl.py | 1 - src/transformers/models/xlm/tokenization_xlm.py | 1 - src/transformers/testing_utils.py | 1 - src/transformers/trainer_pt_utils.py | 1 - src/transformers/trainer_utils.py | 1 - src/transformers/utils/notebook.py | 2 -- 16 files changed, 30 deletions(-) diff --git a/src/transformers/benchmark/benchmark_utils.py b/src/transformers/benchmark/benchmark_utils.py index 36fe5eb116cbef..79740805807185 100644 --- a/src/transformers/benchmark/benchmark_utils.py +++ b/src/transformers/benchmark/benchmark_utils.py @@ -79,7 +79,6 @@ def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: b measurements it is important that the function is executed in a separate process Args: - - `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process - `do_multi_processing`: (`bool`) Whether to run function on separate process or not """ @@ -210,7 +209,6 @@ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_i https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239 Args: - - `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure the peak memory @@ -228,7 +226,6 @@ def get_cpu_memory(process_id: int) -> int: measures current cpu memory usage of a given `process_id` Args: - - `process_id`: (`int`) process_id for which to measure memory Returns @@ -336,7 +333,6 @@ def start_memory_tracing( https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info Args: - - `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or 'transformers.models.gpt2.modeling_gpt2') @@ -483,7 +479,6 @@ def stop_memory_tracing( Stop memory tracing cleanly and return a summary of the memory trace if a trace is given. Args: - `memory_trace` (optional output of start_memory_tracing, default: None): memory trace to convert in summary `ignore_released_memory` (boolean, default: None): diff --git a/src/transformers/generation_flax_utils.py b/src/transformers/generation_flax_utils.py index 2f80c7fcf27e96..fd26a605c48bac 100644 --- a/src/transformers/generation_flax_utils.py +++ b/src/transformers/generation_flax_utils.py @@ -208,7 +208,6 @@ def generate( post](https://huggingface.co/blog/how-to-generate). Parameters: - input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. max_length (`int`, *optional*, defaults to `model.config.max_length`): diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index a3d26b789c646e..6c8da54835ac92 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -418,7 +418,6 @@ def generate( post](https://huggingface.co/blog/how-to-generate). Parameters: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, `(batch_size, sequence_length, feature_dim)` or `(batch_size, num_channels, height, width)`, *optional*): The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the @@ -1336,7 +1335,6 @@ def _generate( post](https://huggingface.co/blog/how-to-generate). Parameters: - input_ids (`tf.Tensor` of `dtype=tf.int32` and shape `(batch_size, sequence_length)`, *optional*): The sequence used as a prompt for the generation. If `None` the method initializes it with `bos_token_id` and a batch size of 1. @@ -2070,7 +2068,6 @@ def greedy_search( Generates sequences for models with a language modeling head using greedy decoding. Parameters: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): @@ -2323,7 +2320,6 @@ def sample( Generates sequences for models with a language modeling head using multinomial sampling. Parameters: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`TFLogitsProcessorList`, *optional*): @@ -2600,7 +2596,6 @@ def beam_search( Generates sequences for models with a language modeling head using beam search with multinomial sampling. Parameters: - input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. max_length (`int`, *optional*, defaults to 20): diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index bb9330de37f0cf..8f6dc6a383a774 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1555,7 +1555,6 @@ def greedy_search( used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): @@ -1789,7 +1788,6 @@ def sample( can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. logits_processor (`LogitsProcessorList`, *optional*): @@ -2046,7 +2044,6 @@ def beam_search( can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): @@ -2355,7 +2352,6 @@ def beam_sample( sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): @@ -2672,7 +2668,6 @@ def group_beam_search( decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. Parameters: - input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): The sequence used as a prompt for the generation. beam_scorer (`BeamScorer`): diff --git a/src/transformers/modelcard.py b/src/transformers/modelcard.py index dc842c2abbf72c..6743c5624eaf3c 100644 --- a/src/transformers/modelcard.py +++ b/src/transformers/modelcard.py @@ -80,8 +80,6 @@ class ModelCard: Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993 Note: A model card can be loaded and saved to disk. - - Parameters: """ def __init__(self, **kwargs): diff --git a/src/transformers/models/auto/auto_factory.py b/src/transformers/models/auto/auto_factory.py index b412f14157f1c3..8d3fabda4706eb 100644 --- a/src/transformers/models/auto/auto_factory.py +++ b/src/transformers/models/auto/auto_factory.py @@ -563,7 +563,6 @@ class _LazyAutoMapping(OrderedDict): " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed. Args: - - config_mapping: The map model type to config class - model_mapping: The map model type to model (or tokenizer) class """ diff --git a/src/transformers/models/flaubert/tokenization_flaubert.py b/src/transformers/models/flaubert/tokenization_flaubert.py index 5d5ad2a657d1bc..911ef37dac5046 100644 --- a/src/transformers/models/flaubert/tokenization_flaubert.py +++ b/src/transformers/models/flaubert/tokenization_flaubert.py @@ -130,7 +130,6 @@ def _tokenize(self, text, bypass_tokenizer=False): - Install with `pip install sacremoses` Args: - - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) (bool). If True, we only apply BPE. diff --git a/src/transformers/models/fsmt/tokenization_fsmt.py b/src/transformers/models/fsmt/tokenization_fsmt.py index 34272e53cf0fcb..66d9819785483c 100644 --- a/src/transformers/models/fsmt/tokenization_fsmt.py +++ b/src/transformers/models/fsmt/tokenization_fsmt.py @@ -354,7 +354,6 @@ def _tokenize(self, text, lang="en", bypass_tokenizer=False): - Install with `pip install sacremoses` Args: - - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) diff --git a/src/transformers/models/perceiver/modeling_perceiver.py b/src/transformers/models/perceiver/modeling_perceiver.py index b3a0beea3d3ca4..d069182f06c3c7 100755 --- a/src/transformers/models/perceiver/modeling_perceiver.py +++ b/src/transformers/models/perceiver/modeling_perceiver.py @@ -1960,7 +1960,6 @@ def build_position_encoding( Builds the position encoding. Args: - - out_channels: refers to the number of channels of the position encodings. - project_pos_dim: if specified, will project the position encodings to this dimension. diff --git a/src/transformers/models/tapex/tokenization_tapex.py b/src/transformers/models/tapex/tokenization_tapex.py index 7c0725ffe7c108..555bf9fd2c6b9a 100644 --- a/src/transformers/models/tapex/tokenization_tapex.py +++ b/src/transformers/models/tapex/tokenization_tapex.py @@ -1398,7 +1398,6 @@ def truncate_table_rows( ): """ Args: - table_content: {"header": xxx, "rows": xxx, "id" (Optionally): xxx} diff --git a/src/transformers/models/transfo_xl/modeling_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_transfo_xl.py index 75793466c7a8d1..257c45af03bbc0 100644 --- a/src/transformers/models/transfo_xl/modeling_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_transfo_xl.py @@ -523,7 +523,6 @@ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, layer: O weights embeddings afterwards if the model class has a *tie_weights()* method. Arguments: - new_num_tokens: (*optional*) int: New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and diff --git a/src/transformers/models/xlm/tokenization_xlm.py b/src/transformers/models/xlm/tokenization_xlm.py index bd7b58eb053b0e..8bb021c5b96987 100644 --- a/src/transformers/models/xlm/tokenization_xlm.py +++ b/src/transformers/models/xlm/tokenization_xlm.py @@ -791,7 +791,6 @@ def _tokenize(self, text, lang="en", bypass_tokenizer=False): externally, and set `bypass_tokenizer=True` to bypass the tokenizer. Args: - - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported languages. However, we don't enforce it. - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False) diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index d21f353a60a8f5..2e99a76232c27c 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -1286,7 +1286,6 @@ def pytest_terminal_summary_main(tr, id): there. Args: - - tr: `terminalreporter` passed from `conftest.py` - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other. diff --git a/src/transformers/trainer_pt_utils.py b/src/transformers/trainer_pt_utils.py index e1ad471b07a9e0..57103b50d5a039 100644 --- a/src/transformers/trainer_pt_utils.py +++ b/src/transformers/trainer_pt_utils.py @@ -377,7 +377,6 @@ class DistributedTensorGatherer: For some reason, that's not going to roll their boat. This class is there to solve that problem. Args: - world_size (`int`): The number of processes used in the distributed training. num_samples (`int`): diff --git a/src/transformers/trainer_utils.py b/src/transformers/trainer_utils.py index 579e5d1dc24ce4..a298fc1de5719e 100644 --- a/src/transformers/trainer_utils.py +++ b/src/transformers/trainer_utils.py @@ -337,7 +337,6 @@ def speed_metrics(split, start_time, num_samples=None, num_steps=None): should be run immediately after the operation to be measured has completed. Args: - - split: name to prefix metric (like train, eval, test...) - start_time: operation start time - num_samples: number of samples processed diff --git a/src/transformers/utils/notebook.py b/src/transformers/utils/notebook.py index 8d81d76c4fd166..636cf785ea94ea 100644 --- a/src/transformers/utils/notebook.py +++ b/src/transformers/utils/notebook.py @@ -120,7 +120,6 @@ def update(self, value: int, force_update: bool = False, comment: str = None): The main method to update the progress bar to `value`. Args: - value (`int`): The value to use. Must be between 0 and `total`. force_update (`bool`, *optional*, defaults to `False`): @@ -204,7 +203,6 @@ class NotebookTrainingTracker(NotebookProgressBar): An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics. Args: - num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*): The list of column names for the metrics table (will be inferred from the first call to [`~utils.notebook.NotebookTrainingTracker.write_line`] if not set). From 713ab6fde510c555010e6ca30e77c8d423988844 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Aug 2022 10:47:19 -0400 Subject: [PATCH 081/539] Bump nbconvert from 6.0.1 to 6.3.0 in /examples/research_projects/lxmert (#18565) Bumps [nbconvert](https://github.com/jupyter/nbconvert) from 6.0.1 to 6.3.0. - [Release notes](https://github.com/jupyter/nbconvert/releases) - [Commits](https://github.com/jupyter/nbconvert/compare/6.0.1...6.3.0) --- updated-dependencies: - dependency-name: nbconvert dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/research_projects/lxmert/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/lxmert/requirements.txt b/examples/research_projects/lxmert/requirements.txt index 28a15ccb6ada21..9e7cc88ce0828f 100644 --- a/examples/research_projects/lxmert/requirements.txt +++ b/examples/research_projects/lxmert/requirements.txt @@ -43,7 +43,7 @@ matplotlib==3.3.1 mistune==2.0.3 msgpack==0.6.2 nbclient==0.5.0 -nbconvert==6.0.1 +nbconvert==6.3.0 nbformat==5.0.7 nest-asyncio==1.4.0 notebook==6.4.12 From 05d3a43c59dadf815fe46df8fed8cc8e816c8588 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Aug 2022 10:47:31 -0400 Subject: [PATCH 082/539] Bump nbconvert in /examples/research_projects/visual_bert (#18566) Bumps [nbconvert](https://github.com/jupyter/nbconvert) from 6.0.1 to 6.3.0. - [Release notes](https://github.com/jupyter/nbconvert/releases) - [Commits](https://github.com/jupyter/nbconvert/compare/6.0.1...6.3.0) --- updated-dependencies: - dependency-name: nbconvert dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/research_projects/visual_bert/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/visual_bert/requirements.txt b/examples/research_projects/visual_bert/requirements.txt index 28a15ccb6ada21..9e7cc88ce0828f 100644 --- a/examples/research_projects/visual_bert/requirements.txt +++ b/examples/research_projects/visual_bert/requirements.txt @@ -43,7 +43,7 @@ matplotlib==3.3.1 mistune==2.0.3 msgpack==0.6.2 nbclient==0.5.0 -nbconvert==6.0.1 +nbconvert==6.3.0 nbformat==5.0.7 nest-asyncio==1.4.0 notebook==6.4.12 From f28f240828d7d262767a29be080502ebc5f984fb Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Thu, 11 Aug 2022 19:10:25 +0300 Subject: [PATCH 083/539] fix owlvit tests, update docstring examples (#18586) --- docs/source/en/model_doc/owlvit.mdx | 4 ++-- src/transformers/models/owlvit/modeling_owlvit.py | 4 ++-- tests/models/owlvit/test_modeling_owlvit.py | 7 +++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/docs/source/en/model_doc/owlvit.mdx b/docs/source/en/model_doc/owlvit.mdx index 0b61d7b274a0c7..ddbc2826d7a655 100644 --- a/docs/source/en/model_doc/owlvit.mdx +++ b/docs/source/en/model_doc/owlvit.mdx @@ -57,8 +57,8 @@ OWL-ViT is a zero-shot text-conditioned object detection model. OWL-ViT uses [CL ... box = [round(i, 2) for i in box.tolist()] ... if score >= score_threshold: ... print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}") -Detected a photo of a cat with confidence 0.243 at location [1.42, 50.69, 308.58, 370.48] -Detected a photo of a cat with confidence 0.298 at location [348.06, 20.56, 642.33, 372.61] +Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29] +Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17] ``` This model was contributed by [adirik](https://huggingface.co/adirik). The original code can be found [here](https://github.com/google-research/scenic/tree/main/scenic/projects/owl_vit). diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index 73ee2597f1b163..c0386ab23d3fba 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -1323,8 +1323,8 @@ def forward( ... box = [round(i, 2) for i in box.tolist()] ... if score >= score_threshold: ... print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}") - Detected a photo of a cat with confidence 0.243 at location [1.42, 50.69, 308.58, 370.48] - Detected a photo of a cat with confidence 0.298 at location [348.06, 20.56, 642.33, 372.61] + Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29] + Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17] ```""" output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states diff --git a/tests/models/owlvit/test_modeling_owlvit.py b/tests/models/owlvit/test_modeling_owlvit.py index 7564d192ad9898..e8f615ec8e54f0 100644 --- a/tests/models/owlvit/test_modeling_owlvit.py +++ b/tests/models/owlvit/test_modeling_owlvit.py @@ -733,7 +733,6 @@ def prepare_img(): @require_vision @require_torch -@unittest.skip("These tests are broken, fix me Alara") class OwlViTModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): @@ -763,8 +762,7 @@ def test_inference(self): outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) - expected_logits = torch.tensor([[4.4420, 0.6181]], device=torch_device) - + expected_logits = torch.tensor([[3.4613, 0.9403]], device=torch_device) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3)) @slow @@ -788,7 +786,8 @@ def test_inference_object_detection(self): num_queries = int((model.config.vision_config.image_size / model.config.vision_config.patch_size) ** 2) self.assertEqual(outputs.pred_boxes.shape, torch.Size((1, num_queries, 4))) + expected_slice_boxes = torch.tensor( - [[0.0948, 0.0471, 0.1915], [0.3194, 0.0583, 0.6498], [0.1441, 0.0452, 0.2197]] + [[0.0691, 0.0445, 0.1373], [0.1592, 0.0456, 0.3192], [0.1632, 0.0423, 0.2478]] ).to(torch_device) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) From c8b6ae858d61e5bc10e388d095aa74f7690d1021 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 11 Aug 2022 17:32:11 +0100 Subject: [PATCH 084/539] Return the permuted hidden states if return_dict=True (#18578) --- src/transformers/models/convnext/modeling_tf_convnext.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/convnext/modeling_tf_convnext.py b/src/transformers/models/convnext/modeling_tf_convnext.py index 405aeff6e0bdd5..0be2d291923812 100644 --- a/src/transformers/models/convnext/modeling_tf_convnext.py +++ b/src/transformers/models/convnext/modeling_tf_convnext.py @@ -330,7 +330,8 @@ def call( hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) if not return_dict: - return (last_hidden_state, pooled_output) + encoder_outputs[1:] + hidden_states = hidden_states if output_hidden_states else () + return (last_hidden_state, pooled_output) + hidden_states return TFBaseModelOutputWithPooling( last_hidden_state=last_hidden_state, From bce36ee065f7749c997be4c30a3f9279df7d5dba Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Fri, 12 Aug 2022 09:48:10 +0200 Subject: [PATCH 085/539] Load sharded pt to flax (#18419) * initial commit * add small test * add cross pt tf flag to test * fix quality * style * update test with new repo * fix failing test * update * fix wrong param ordering * style * update based on review * update related to recent new caching mechanism * quality * Update based on review Co-authored-by: sgugger * quality and style * Update src/transformers/modeling_flax_utils.py Co-authored-by: sgugger Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- .../modeling_flax_pytorch_utils.py | 74 +++++++++++++++++-- src/transformers/modeling_flax_utils.py | 20 ++++- tests/test_modeling_flax_common.py | 8 ++ 3 files changed, 94 insertions(+), 8 deletions(-) diff --git a/src/transformers/modeling_flax_pytorch_utils.py b/src/transformers/modeling_flax_pytorch_utils.py index a91d41b9d6d91b..76eaa53f89d04c 100644 --- a/src/transformers/modeling_flax_pytorch_utils.py +++ b/src/transformers/modeling_flax_pytorch_utils.py @@ -38,7 +38,9 @@ ##################### -def load_pytorch_checkpoint_in_flax_state_dict(flax_model, pytorch_checkpoint_path, allow_missing_keys=False): +def load_pytorch_checkpoint_in_flax_state_dict( + flax_model, pytorch_checkpoint_path, is_sharded, allow_missing_keys=False +): """Load pytorch checkpoints in a flax model""" try: import torch # noqa: F401 @@ -50,14 +52,17 @@ def load_pytorch_checkpoint_in_flax_state_dict(flax_model, pytorch_checkpoint_pa ) raise - pt_path = os.path.abspath(pytorch_checkpoint_path) - logger.info(f"Loading PyTorch weights from {pt_path}") + if not is_sharded: + pt_path = os.path.abspath(pytorch_checkpoint_path) + logger.info(f"Loading PyTorch weights from {pt_path}") - pt_state_dict = torch.load(pt_path, map_location="cpu") - logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters.") - - flax_state_dict = convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model) + pt_state_dict = torch.load(pt_path, map_location="cpu") + logger.info(f"PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values()):,} parameters.") + flax_state_dict = convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model) + else: + # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files + flax_state_dict = convert_pytorch_sharded_state_dict_to_flax(pytorch_checkpoint_path, flax_model) return flax_state_dict @@ -156,6 +161,61 @@ def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model): return unflatten_dict(flax_state_dict) +############################ +# Sharded Pytorch => Flax # +############################ + + +def convert_pytorch_sharded_state_dict_to_flax(shard_filenames, flax_model): + import torch + + # Load the index + flax_state_dict = {} + for shard_file in shard_filenames: + # load using msgpack utils + pt_state_dict = torch.load(shard_file) + pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} + + model_prefix = flax_model.base_model_prefix + random_flax_state_dict = flatten_dict(flax_model.params) + + load_model_with_head_into_base_model = (model_prefix not in flax_model.params) and ( + model_prefix in set([k.split(".")[0] for k in pt_state_dict.keys()]) + ) + load_base_model_into_model_with_head = (model_prefix in flax_model.params) and ( + model_prefix not in set([k.split(".")[0] for k in pt_state_dict.keys()]) + ) + # Need to change some parameters name to match Flax names + for pt_key, pt_tensor in pt_state_dict.items(): + + pt_tuple_key = tuple(pt_key.split(".")) + + # remove base model prefix if necessary + has_base_model_prefix = pt_tuple_key[0] == model_prefix + if load_model_with_head_into_base_model and has_base_model_prefix: + pt_tuple_key = pt_tuple_key[1:] + + # Correctly rename weight parameters + flax_key, flax_tensor = rename_key_and_reshape_tensor( + pt_tuple_key, pt_tensor, random_flax_state_dict, model_prefix + ) + # add model prefix if necessary + require_base_model_prefix = (model_prefix,) + flax_key in random_flax_state_dict + if load_base_model_into_model_with_head and require_base_model_prefix: + flax_key = (model_prefix,) + flax_key + + if flax_key in random_flax_state_dict: + if flax_tensor.shape != random_flax_state_dict[flax_key].shape: + raise ValueError( + f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " + f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." + ) + + # also add unexpected weight so that warning is thrown + flax_state_dict[flax_key] = jnp.asarray(flax_tensor) + return unflatten_dict(flax_state_dict) + + ##################### # Flax => PyTorch # ##################### diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index 683e25631c0f44..00bb5480ffe3e9 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -40,6 +40,7 @@ from .utils import ( FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME, + WEIGHTS_INDEX_NAME, WEIGHTS_NAME, PushToHubMixin, add_code_sample_docstrings, @@ -650,6 +651,10 @@ def from_pretrained( if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) + elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)): + # Load from a sharded pytorch checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME) + is_sharded = True elif os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)): # Load from a Flax checkpoint archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME) @@ -700,6 +705,13 @@ def from_pretrained( ) if resolved_archive_file is not None: is_sharded = True + # Maybe the checkpoint is pytorch sharded, we try to grab the pytorch index name in this case. + elif resolved_archive_file is None and from_pt: + resolved_archive_file = cached_file( + pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs + ) + if resolved_archive_file is not None: + is_sharded = True if resolved_archive_file is None: # Otherwise, maybe there is a TF or Flax model file. We try those to give a helpful error # message. @@ -714,6 +726,12 @@ def from_pretrained( f" {FLAX_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to" " load this model from those weights." ) + elif has_file(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **has_file_kwargs): + raise EnvironmentError( + f"{pretrained_model_name_or_path} does not appear to have a file named" + f" {FLAX_WEIGHTS_INDEX_NAME} but there is a sharded file for PyTorch weights. Use" + " `from_pt=True` to load this model from those weights." + ) else: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named" @@ -761,7 +779,7 @@ def from_pretrained( model = cls(config, *model_args, _do_init=_do_init, **model_kwargs) if from_pt: - state = load_pytorch_checkpoint_in_flax_state_dict(model, resolved_archive_file) + state = load_pytorch_checkpoint_in_flax_state_dict(model, resolved_archive_file, is_sharded) else: if is_sharded: diff --git a/tests/test_modeling_flax_common.py b/tests/test_modeling_flax_common.py index e22c7e6705b3bd..837f874889ae7d 100644 --- a/tests/test_modeling_flax_common.py +++ b/tests/test_modeling_flax_common.py @@ -1099,6 +1099,14 @@ def test_checkpoint_sharding_local(self): for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()): self.assertTrue(np.allclose(np.array(p1), np.array(p2))) + @is_pt_flax_cross_test + def test_from_sharded_pt(self): + model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) + ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only") + for key, ref_val in flatten_dict(ref_model.params).items(): + val = flatten_dict(model.params)[key] + assert np.allclose(np.array(val), np.array(ref_val)) + def test_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() From 46d09410eba7892a47d15eb8a7b29b5b7b598a19 Mon Sep 17 00:00:00 2001 From: Ian Castillo <7807897+donelianc@users.noreply.github.com> Date: Fri, 12 Aug 2022 13:11:28 +0200 Subject: [PATCH 086/539] Add type hints for ViLT models (#18577) * Add type hints for Vilt models * Add missing return type for TokenClassification class --- src/transformers/models/vilt/modeling_vilt.py | 158 +++++++++--------- 1 file changed, 79 insertions(+), 79 deletions(-) diff --git a/src/transformers/models/vilt/modeling_vilt.py b/src/transformers/models/vilt/modeling_vilt.py index 308358850c9808..dab78c0bce8687 100755 --- a/src/transformers/models/vilt/modeling_vilt.py +++ b/src/transformers/models/vilt/modeling_vilt.py @@ -17,7 +17,7 @@ import collections.abc import math from dataclasses import dataclass -from typing import List, Optional, Tuple +from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint @@ -761,19 +761,19 @@ class PreTrainedModel @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - pixel_values=None, - pixel_mask=None, - head_mask=None, - inputs_embeds=None, - image_embeds=None, - image_token_type_idx=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + pixel_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + image_embeds: Optional[torch.FloatTensor] = None, + image_token_type_idx: Optional[int] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[BaseModelOutputWithPooling, Tuple[torch.FloatTensor]]: r""" Returns: @@ -914,19 +914,19 @@ def set_output_embeddings(self, new_embeddings): @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - pixel_values=None, - pixel_mask=None, - head_mask=None, - inputs_embeds=None, - image_embeds=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + pixel_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + image_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: r""" labels (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*): Labels for computing the masked language modeling loss. Indices should be in *[-100, 0, ..., @@ -1088,19 +1088,19 @@ def __init__(self, config): @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - pixel_values=None, - pixel_mask=None, - head_mask=None, - inputs_embeds=None, - image_embeds=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + pixel_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + image_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.FloatTensor` of shape `(batch_size, num_labels)`, *optional*): Labels for computing the visual question answering loss. This tensor must be either a one-hot encoding of @@ -1193,19 +1193,19 @@ def __init__(self, config): @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - pixel_values=None, - pixel_mask=None, - head_mask=None, - inputs_embeds=None, - image_embeds=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + pixel_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + image_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels are currently not supported. @@ -1299,19 +1299,19 @@ def __init__(self, config): @replace_return_docstrings(output_type=ViltForImagesAndTextClassificationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - pixel_values=None, - pixel_mask=None, - head_mask=None, - inputs_embeds=None, - image_embeds=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + pixel_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + image_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[ViltForImagesAndTextClassificationOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Binary classification labels. @@ -1436,19 +1436,19 @@ def __init__(self, config): @replace_return_docstrings(output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - pixel_values=None, - pixel_mask=None, - head_mask=None, - inputs_embeds=None, - image_embeds=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + pixel_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + image_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, text_sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. From 3cdaea47ec89da98c805705d30ee5b1e021c54e4 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Fri, 12 Aug 2022 20:36:27 +0800 Subject: [PATCH 087/539] update doc for perf_train_cpu_many, add intel mpi introduction (#18576) * update doc for perf_train_cpu_many, add mpi introduction Signed-off-by: Wang, Yi A * Update docs/source/en/perf_train_cpu_many.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update docs/source/en/perf_train_cpu_many.mdx Signed-off-by: Wang, Yi A Signed-off-by: Wang, Yi A Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- docs/source/en/perf_train_cpu_many.mdx | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/docs/source/en/perf_train_cpu_many.mdx b/docs/source/en/perf_train_cpu_many.mdx index 5705517f5b1b4a..f4f77965748e3e 100644 --- a/docs/source/en/perf_train_cpu_many.mdx +++ b/docs/source/en/perf_train_cpu_many.mdx @@ -36,8 +36,22 @@ pip install oneccl_bind_pt=={pytorch_version} -f https://software.intel.com/ipex ``` where `{pytorch_version}` should be your PyTorch version, for instance 1.12.0. Check more approaches for [oneccl_bind_pt installation](https://github.com/intel/torch-ccl). +Versions of oneCCL and PyTorch must match. -### Usage in Trainer +## Intel® MPI library +Use this standards-based MPI implementation to deliver flexible, efficient, scalable cluster messaging on Intel® architecture. This component is part of the Intel® oneAPI HPC Toolkit. +It can be installed via [MPI](https://www.intel.com/content/www/us/en/developer/articles/tool/oneapi-standalone-components.html#mpi). + +Please set the environment by following command before using it. + +``` +source /opt/intel/oneapi/setvars.sh +``` + +The following "Usage in Trainer" takes mpirun in Intel® MPI library as an example. + + +## Usage in Trainer To enable multi CPU distributed training in the Trainer with the ccl backend, users should add **`--xpu_backend ccl`** in the command arguments. Let's see an example with the [question-answering example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) From d344534bf67b79a0760d873b0b9bf6f41c92dcba Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Fri, 12 Aug 2022 05:40:53 -0700 Subject: [PATCH 088/539] typos (#18594) --- src/transformers/generation_utils.py | 2 +- src/transformers/models/fsmt/modeling_fsmt.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 8f6dc6a383a774..637b723c88de20 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1200,7 +1200,7 @@ def generate( input_ids_seq_length = input_ids.shape[-1] if max_length is None and max_new_tokens is None: warnings.warn( - "Neither `max_length` nor `max_new_tokens` have been set, `max_length` will default to " + "Neither `max_length` nor `max_new_tokens` has been set, `max_length` will default to " f"{self.config.max_length} (`self.config.max_length`). Controlling `max_length` via the config is " "deprecated and `max_length` will be removed from the config in v5 of Transformers -- we recommend " "using `max_new_tokens` to control the maximum length of the generation.", diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index d44bc80363d09e..f469266d7454f6 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -220,7 +220,7 @@ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. - IIndices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and + Indices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) From 4eed2beca0fd8058a1c51684f68599522adf20c9 Mon Sep 17 00:00:00 2001 From: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Date: Fri, 12 Aug 2022 18:18:37 +0530 Subject: [PATCH 089/539] FSDP bug fix for `load_state_dict` (#18596) --- src/transformers/trainer.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index e537b3b6357adb..64d5a3fadf4d6d 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1935,7 +1935,9 @@ def _load_from_checkpoint(self, resume_from_checkpoint, model=None): else: # We load the model state dict on the CPU to avoid an OOM error. state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu") - load_result = model.load_state_dict(state_dict, strict=False) + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) # release memory del state_dict self._issue_warnings_after_load(load_result) @@ -1989,7 +1991,9 @@ def _load_best_model(self): # We load the model state dict on the CPU to avoid an OOM error. state_dict = torch.load(best_model_path, map_location="cpu") # If the model is on the GPU, it still works! - load_result = model.load_state_dict(state_dict, strict=False) + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) if not is_sagemaker_mp_enabled(): self._issue_warnings_after_load(load_result) elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)): From 2156619f10b33fb58520830bff0a92c36d13128e Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 12 Aug 2022 15:10:00 +0200 Subject: [PATCH 090/539] Add `TFAutoModelForSemanticSegmentation` to the main `__init__.py` (#18600) Co-authored-by: ydshieh --- docs/source/en/model_doc/auto.mdx | 4 ++++ src/transformers/__init__.py | 2 ++ src/transformers/models/auto/__init__.py | 2 ++ src/transformers/utils/dummy_tf_objects.py | 7 +++++++ 4 files changed, 15 insertions(+) diff --git a/docs/source/en/model_doc/auto.mdx b/docs/source/en/model_doc/auto.mdx index 67fc81d280a79b..995296485b9402 100644 --- a/docs/source/en/model_doc/auto.mdx +++ b/docs/source/en/model_doc/auto.mdx @@ -186,6 +186,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its [[autodoc]] TFAutoModelForImageClassification +## TFAutoModelForSemanticSegmentation + +[[autodoc]] TFAutoModelForSemanticSegmentation + ## TFAutoModelForMaskedLM [[autodoc]] TFAutoModelForMaskedLM diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index be2be2727f0146..2f53db07f078f0 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -2105,6 +2105,7 @@ "TFAutoModelForNextSentencePrediction", "TFAutoModelForPreTraining", "TFAutoModelForQuestionAnswering", + "TFAutoModelForSemanticSegmentation", "TFAutoModelForSeq2SeqLM", "TFAutoModelForSequenceClassification", "TFAutoModelForSpeechSeq2Seq", @@ -4599,6 +4600,7 @@ TFAutoModelForNextSentencePrediction, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, + TFAutoModelForSemanticSegmentation, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForSpeechSeq2Seq, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index 139d4feda336e0..ec253f6037a3d3 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -128,6 +128,7 @@ "TFAutoModelForNextSentencePrediction", "TFAutoModelForPreTraining", "TFAutoModelForQuestionAnswering", + "TFAutoModelForSemanticSegmentation", "TFAutoModelForSeq2SeqLM", "TFAutoModelForSequenceClassification", "TFAutoModelForSpeechSeq2Seq", @@ -271,6 +272,7 @@ TFAutoModelForNextSentencePrediction, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, + TFAutoModelForSemanticSegmentation, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForSpeechSeq2Seq, diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 6df601ca646af3..fec5ffe700808a 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -362,6 +362,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +class TFAutoModelForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + class TFAutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["tf"] From ed1924e801c4d7e484703dc4624c185b8da5d5a2 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Fri, 12 Aug 2022 14:53:51 +0100 Subject: [PATCH 091/539] Generate: validate `model_kwargs` (and catch typos in generate arguments) (#18261) * validate generate model_kwargs * generate tests -- not all models have an attn mask --- src/transformers/generation_utils.py | 26 +++++ tests/generation/test_generation_utils.py | 113 +++++++++++++--------- 2 files changed, 91 insertions(+), 48 deletions(-) diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 637b723c88de20..b5b042e718c1c3 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -841,6 +841,29 @@ def compute_transition_beam_scores( return transition_scores + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): + """Validates model kwargs for generation. Generate argument typos will also be caught here.""" + # Excludes arguments that are handled before calling any model function + if self.config.is_encoder_decoder: + for key in ["decoder_input_ids"]: + model_kwargs.pop(key, None) + + unused_model_args = [] + model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) + # `kwargs` if often used to handle optional forward pass inputs like `attention_mask`. If + # `prepare_inputs_for_generation` doesn't accept `kwargs`, then a stricter check can be made ;) + if "kwargs" in model_args: + model_args |= set(inspect.signature(self.forward).parameters) + for key, value in model_kwargs.items(): + if value is not None and key not in model_args: + unused_model_args.append(key) + + if unused_model_args: + raise ValueError( + f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" + " generate arguments will also show up in this list)" + ) + @torch.no_grad() def generate( self, @@ -1120,6 +1143,9 @@ def generate( >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Paris ist eines der dichtesten besiedelten Gebiete Europas.'] ```""" + # 0. Validate model kwargs + self._validate_model_kwargs(model_kwargs.copy()) + # 1. Set generation parameters if not already defined bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id num_beams = num_beams if num_beams is not None else self.config.num_beams diff --git a/tests/generation/test_generation_utils.py b/tests/generation/test_generation_utils.py index 56227403ae60b9..ba13669368d228 100644 --- a/tests/generation/test_generation_utils.py +++ b/tests/generation/test_generation_utils.py @@ -75,21 +75,25 @@ class GenerationTesterMixin: def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - input_ids = inputs_dict[self.input_name] - attention_mask = torch.ones_like(input_ids, dtype=torch.long) # cut to half length & take max batch_size 3 max_batch_size = 2 sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:max_batch_size, :sequence_length] - attention_mask = attention_mask[:max_batch_size, :sequence_length] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id + + # TransfoXL has no attention mask + if "transfoxl" in config.__class__.__name__.lower(): + attention_mask = None + else: + attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:max_batch_size, :sequence_length] + return config, input_ids, attention_mask, max_length @staticmethod @@ -252,10 +256,9 @@ def _greedy_generate( ) kwargs = {} - + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, - attention_mask=attention_mask, do_sample=False, num_beams=1, max_length=max_length, @@ -265,6 +268,7 @@ def _greedy_generate( return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, + **model_kwargs, ) if model.config.is_encoder_decoder: @@ -278,16 +282,17 @@ def _greedy_generate( kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( input_ids, max_length=max_length, - attention_mask=attention_mask, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, + **model_kwargs, ) return output_greedy, output_generate @@ -308,13 +313,13 @@ def _sample_generate( return_dict_in_generate=False, ): torch.manual_seed(0) + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, - attention_mask=attention_mask, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, @@ -327,7 +332,7 @@ def _sample_generate( torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: - encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs( + encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, @@ -336,18 +341,16 @@ def _sample_generate( output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs - input_ids_clone = input_ids_clone.repeat_interleave(num_return_sequences, dim=0) - else: - attention_mask_clone = attention_mask.repeat_interleave(num_return_sequences, dim=0) - input_ids_clone = input_ids.repeat_interleave(num_return_sequences, dim=0) + elif attention_mask is not None: + attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) # prevent flaky generation test failures logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( - input_ids_clone, - attention_mask=attention_mask_clone, + input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, @@ -356,6 +359,7 @@ def _sample_generate( output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, + **model_kwargs, ) return output_sample, output_generate @@ -374,9 +378,9 @@ def _beam_search_generate( output_hidden_states=False, return_dict_in_generate=False, ): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, - attention_mask=attention_mask, do_sample=False, max_length=max_length, output_scores=output_scores, @@ -386,12 +390,13 @@ def _beam_search_generate( remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, + **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: - encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs( + encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, @@ -400,23 +405,22 @@ def _beam_search_generate( output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs - input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0) - else: - attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) - input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0) + elif attention_mask is not None: + attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_search = model.beam_search( - input_ids_clone, + input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, - attention_mask=attention_mask_clone, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, + **model_kwargs, ) return output_generate, output_beam_search @@ -437,9 +441,9 @@ def _beam_sample_generate( return_dict_in_generate=False, ): torch.manual_seed(0) + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, - attention_mask=attention_mask, do_sample=True, max_length=max_length, output_scores=output_scores, @@ -449,6 +453,7 @@ def _beam_sample_generate( remove_invalid_values=True, **beam_kwargs, **logits_warper_kwargs, + **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences` kwargs = {} @@ -462,7 +467,7 @@ def _beam_sample_generate( output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs - else: + elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0) # prevent flaky generation test failures @@ -471,11 +476,11 @@ def _beam_sample_generate( torch.manual_seed(0) with torch.no_grad(): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_sample = model.beam_sample( input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0), beam_scorer, max_length=max_length, - attention_mask=attention_mask, logits_warper=logits_warper, logits_processor=logits_processor, output_scores=output_scores, @@ -483,6 +488,7 @@ def _beam_sample_generate( output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, + **model_kwargs, ) return output_generate, output_beam_sample @@ -502,9 +508,9 @@ def _group_beam_search_generate( output_hidden_states=False, return_dict_in_generate=False, ): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, - attention_mask=attention_mask, do_sample=False, max_length=max_length, output_scores=output_scores, @@ -514,12 +520,13 @@ def _group_beam_search_generate( remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, + **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: - encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs( + encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, @@ -528,23 +535,22 @@ def _group_beam_search_generate( output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs - input_ids_clone = input_ids_clone.repeat_interleave(beam_scorer.num_beams, dim=0) - else: - attention_mask_clone = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) - input_ids_clone = input_ids.repeat_interleave(beam_scorer.num_beams, dim=0) + elif attention_mask is not None: + attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.group_beam_search( - input_ids_clone, + input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, - attention_mask=attention_mask_clone, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, + **model_kwargs, ) return output_generate, output_group_beam_search @@ -564,9 +570,9 @@ def _constrained_beam_search_generate( output_hidden_states=False, return_dict_in_generate=False, ): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, - attention_mask=attention_mask, do_sample=False, max_length=max_length, output_scores=output_scores, @@ -577,12 +583,13 @@ def _constrained_beam_search_generate( constraints=constraints, **beam_kwargs, **logits_process_kwargs, + **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: - encoder_outputs, input_ids_clone, attention_mask_clone = self._get_encoder_outputs( + encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, @@ -591,23 +598,22 @@ def _constrained_beam_search_generate( output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs - input_ids_clone = input_ids_clone.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) - else: - attention_mask_clone = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) - input_ids_clone = input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) + elif attention_mask is not None: + attention_mask = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) with torch.no_grad(): + model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.constrained_beam_search( - input_ids_clone, + input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0), constrained_beam_scorer, max_length=max_length, - attention_mask=attention_mask_clone, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, + **model_kwargs, ) return output_generate, output_group_beam_search @@ -1044,12 +1050,7 @@ def test_generate_without_input_ids(self): model = model_class(config).to(torch_device) model.eval() - output_ids_generate = model.generate( - do_sample=False, - max_length=max_length, - remove_invalid_values=True, - ) - + output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) def test_group_beam_search_generate(self): @@ -2052,7 +2053,7 @@ def test_max_new_tokens_decoder_only(self): # max_new_tokens and max_length serve the same purpose and must not be used together. with self.assertRaises(ValueError): - gpt2_model.generate(decoder_input_ids=input_ids, max_new_tokens=10, max_length=20) + gpt2_model.generate(input_ids=input_ids, max_new_tokens=10, max_length=20) def test_encoder_decoder_generate_with_inputs_embeds(self): article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" @@ -2699,3 +2700,19 @@ def test_constrained_beam_search_mixin_type_checks(self): with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[[-1]]]) + + def test_validate_generation_inputs(self): + tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") + model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") + + encoder_input_str = "Hello world" + input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids + + # typos are quickly detected (the correct argument is `do_sample`) + with self.assertRaisesRegex(ValueError, "do_samples"): + model.generate(input_ids, do_samples=True) + + # arbitrary arguments that will not be used anywhere are also not accepted + with self.assertRaisesRegex(ValueError, "foo"): + fake_model_kwargs = {"foo": "bar"} + model.generate(input_ids, **fake_model_kwargs) From a5ca56ff158075351149220319c14dde555a86f5 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Fri, 12 Aug 2022 16:15:09 +0200 Subject: [PATCH 092/539] Supporting seq2seq models for `bitsandbytes` integration (#18579) * Supporting seq2seq models for `bitsandbytes` integration - `bitsandbytes` integration supports now seq2seq models - check if a model has tied weights as an additional check * small modification - tie the weights before looking at tied weights! --- src/transformers/utils/bitsandbytes.py | 14 +++++++++++++- tests/mixed_int8/test_mixed_int8.py | 22 ++++++++++++++++++++-- 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/src/transformers/utils/bitsandbytes.py b/src/transformers/utils/bitsandbytes.py index ee4e52d421fd09..eca605b2edef94 100644 --- a/src/transformers/utils/bitsandbytes.py +++ b/src/transformers/utils/bitsandbytes.py @@ -1,3 +1,5 @@ +from copy import deepcopy + from transformers.utils import is_accelerate_available, is_bitsandbytes_available @@ -9,6 +11,7 @@ if is_accelerate_available(): from accelerate import init_empty_weights + from accelerate.utils import find_tied_parameters def set_module_8bit_tensor_to_device(module, tensor_name, device, value=None): @@ -132,8 +135,17 @@ def get_key_to_not_convert(model): model (`torch.nn.Module`): Input model """ + # Create a copy of the model and tie the weights, then + # check if it contains tied weights + tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` + tied_model.tie_weights() + has_tied_params = len(find_tied_parameters(tied_model)) > 0 + + # Check if it is a base model + is_base_model = not hasattr(model, model.base_model_prefix) + # Ignore this for base models (BertModel, GPT2Model, etc.) - if not hasattr(model, model.base_model_prefix): + if (not has_tied_params) and is_base_model: return "" # otherwise they have an attached head diff --git a/tests/mixed_int8/test_mixed_int8.py b/tests/mixed_int8/test_mixed_int8.py index 0cd7ca16411c19..2911d67748809a 100644 --- a/tests/mixed_int8/test_mixed_int8.py +++ b/tests/mixed_int8/test_mixed_int8.py @@ -15,7 +15,14 @@ import gc import unittest -from transformers import AutoModel, AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, pipeline +from transformers import ( + AutoModel, + AutoModelForCausalLM, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoTokenizer, + pipeline, +) from transformers.testing_utils import ( is_torch_available, require_accelerate, @@ -106,12 +113,21 @@ def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" - # Models and tokenizer + self.seq_to_seq_name = "t5-small" + + # Different types of model + self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") + # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_8bit=True, device_map="auto" ) + # CausalLM model self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") + # Seq2seq model + self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( + self.seq_to_seq_name, load_in_8bit=True, device_map="auto" + ) def tearDown(self): r""" @@ -121,6 +137,7 @@ def tearDown(self): del self.base_model del self.sequence_model del self.model_8bit + del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() @@ -138,6 +155,7 @@ def test_correct_head_class(self): # Other heads should be nn.Parameter self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) + self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class MixedInt8TestPipeline(BaseMixedInt8Test): From 2ab790e82d0759b667cd848a4d49e6ad65e15d59 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Fri, 12 Aug 2022 16:40:58 +0200 Subject: [PATCH 093/539] Add Donut (#18488) * First draft * Improve script * Update script * Make conversion work * Add final_layer_norm attribute to Swin's config * Add DonutProcessor * Convert more models * Improve feature extractor and convert base models * Fix bug * Improve integration tests * Improve integration tests and add model to README * Add doc test * Add feature extractor to docs * Fix integration tests * Remove register_buffer * Fix toctree and add missing attribute * Add DonutSwin * Make conversion script work * Improve conversion script * Address comment * Fix bug * Fix another bug * Remove deprecated method from docs * Make Swin and Swinv2 untouched * Fix code examples * Fix processor * Update model_type to donut-swin * Add feature extractor tests, add token2json method, improve feature extractor * Fix failing tests, remove integration test * Add do_thumbnail for consistency * Improve code examples * Add code example for document parsing * Add DonutSwin to MODEL_NAMES_MAPPING * Add model to appropriate place in toctree * Update namespace to appropriate organization Co-authored-by: Niels Rogge --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/donut.mdx | 214 ++++ src/transformers/__init__.py | 12 + src/transformers/image_utils.py | 22 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 5 + .../models/auto/feature_extraction_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 1 + .../models/auto/processing_auto.py | 1 + src/transformers/models/donut/__init__.py | 76 ++ .../models/donut/configuration_donut_swin.py | 140 +++ .../models/donut/convert_donut_to_pytorch.py | 234 +++++ .../models/donut/feature_extraction_donut.py | 208 ++++ .../models/donut/modeling_donut_swin.py | 941 ++++++++++++++++++ .../models/donut/processing_donut.py | 156 +++ .../convert_trocr_unilm_to_pytorch.py | 0 src/transformers/utils/dummy_pt_objects.py | 17 + .../utils/dummy_vision_objects.py | 7 + src/transformers/utils/fx.py | 1 + tests/models/donut/__init__.py | 0 .../donut/test_feature_extraction_donut.py | 203 ++++ .../models/donut/test_modeling_donut_swin.py | 464 +++++++++ .../test_modeling_vision_encoder_decoder.py | 216 +++- utils/check_copies.py | 1 + utils/check_repo.py | 1 + utils/documentation_tests.txt | 1 + 31 files changed, 2924 insertions(+), 7 deletions(-) create mode 100644 docs/source/en/model_doc/donut.mdx create mode 100644 src/transformers/models/donut/__init__.py create mode 100644 src/transformers/models/donut/configuration_donut_swin.py create mode 100644 src/transformers/models/donut/convert_donut_to_pytorch.py create mode 100644 src/transformers/models/donut/feature_extraction_donut.py create mode 100644 src/transformers/models/donut/modeling_donut_swin.py create mode 100644 src/transformers/models/donut/processing_donut.py rename src/transformers/models/{vision_encoder_decoder => trocr}/convert_trocr_unilm_to_pytorch.py (100%) create mode 100644 tests/models/donut/__init__.py create mode 100644 tests/models/donut/test_feature_extraction_donut.py create mode 100644 tests/models/donut/test_modeling_donut_swin.py diff --git a/README.md b/README.md index 46a4b07c14cd32..30bc6d870bbf01 100644 --- a/README.md +++ b/README.md @@ -286,6 +286,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. 1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. 1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. +1. **[Donut](https://huggingface.co/docs/transformers/main/model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. diff --git a/README_ko.md b/README_ko.md index c63fdca749da8f..cc0b790ad76a8d 100644 --- a/README_ko.md +++ b/README_ko.md @@ -242,6 +242,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. 1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT. 1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. +1. **[Donut](https://huggingface.co/docs/transformers/main/model_doc/donut)** (from NAVER) released with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. diff --git a/README_zh-hans.md b/README_zh-hans.md index 0ab06bd96ad99f..fe2fa45f71f39f 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -266,6 +266,7 @@ conda install -c huggingface transformers 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (来自 Microsoft Research) 伴随论文 [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) 由 Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan 发布。 1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (来自 HuggingFace), 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 同样的方法也应用于压缩 GPT-2 到 [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa 到 [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT 到 [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) 和德语版 DistilBERT。 1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (来自 Microsoft Research) 伴随论文 [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) 由 Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei 发布。 +1. **[Donut](https://huggingface.co/docs/transformers/main/model_doc/donut)** (来自 NAVER) 伴随论文 [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) 由 Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park 发布。 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (来自 Facebook) 伴随论文 [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 由 Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih 发布。 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (来自 Intel Labs) 伴随论文 [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 由 René Ranftl, Alexey Bochkovskiy, Vladlen Koltun 发布。 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 90f29ad031b8b0..4f5a9954761494 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -278,6 +278,7 @@ conda install -c huggingface transformers 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. 1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT. 1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. +1. **[Donut](https://huggingface.co/docs/transformers/main/model_doc/donut)** (from NAVER) released with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 32ab4c6361d3a7..78137d2c8a74c1 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -427,6 +427,8 @@ title: CLIP - local: model_doc/data2vec title: Data2Vec + - local: model_doc/donut + title: Donut - local: model_doc/flava title: FLAVA - local: model_doc/groupvit diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 5c0d51d8b7afb2..257eba8171ed1c 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -84,6 +84,7 @@ The library currently contains JAX, PyTorch and TensorFlow implementations, pret 1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. 1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. 1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. +1. **[Donut](model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. @@ -224,6 +225,7 @@ Flax), PyTorch, and/or TensorFlow. | DeiT | ❌ | ❌ | ✅ | ✅ | ❌ | | DETR | ❌ | ❌ | ✅ | ❌ | ❌ | | DistilBERT | ✅ | ✅ | ✅ | ✅ | ✅ | +| DonutSwin | ❌ | ❌ | ✅ | ❌ | ❌ | | DPR | ✅ | ✅ | ✅ | ✅ | ❌ | | DPT | ❌ | ❌ | ✅ | ❌ | ❌ | | ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ | diff --git a/docs/source/en/model_doc/donut.mdx b/docs/source/en/model_doc/donut.mdx new file mode 100644 index 00000000000000..9c9973be022e7c --- /dev/null +++ b/docs/source/en/model_doc/donut.mdx @@ -0,0 +1,214 @@ + + +# Donut + +## Overview + +The Donut model was proposed in [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by +Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. +Donut consists of an image Transformer encoder and an autoregressive text Transformer decoder to perform document understanding +tasks such as document image classification, form understanding and visual question answering. + +The abstract from the paper is the following: + +*Understanding document images (e.g., invoices) is a core but challenging task since it requires complex functions such as reading text and a holistic understanding of the document. Current Visual Document Understanding (VDU) methods outsource the task of reading text to off-the-shelf Optical Character Recognition (OCR) engines and focus on the understanding task with the OCR outputs. Although such OCR-based approaches have shown promising performance, they suffer from 1) high computational costs for using OCR; 2) inflexibility of OCR models on languages or types of document; 3) OCR error propagation to the subsequent process. To address these issues, in this paper, we introduce a novel OCR-free VDU model named Donut, which stands for Document understanding transformer. As the first step in OCR-free VDU research, we propose a simple architecture (i.e., Transformer) with a pre-training objective (i.e., cross-entropy loss). Donut is conceptually simple yet effective. Through extensive experiments and analyses, we show a simple OCR-free VDU model, Donut, achieves state-of-the-art performances on various VDU tasks in terms of both speed and accuracy. In addition, we offer a synthetic data generator that helps the model pre-training to be flexible in various languages and domains.* + + + + Donut high-level overview. Taken from the original paper. + +This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found +[here](https://github.com/clovaai/donut). + +Tips: + +- The quickest way to get started with Donut is by checking the [tutorial + notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/donut), which show how to use the model + at inference time as well as fine-tuning on custom data. +- Donut is always used within the [VisionEncoderDecoder](vision-encoder-decoder) framework. + +## Inference + +Donut's [`VisionEncoderDecoder`] model accepts images as input and makes use of +[`~generation_utils.GenerationMixin.generate`] to autoregressively generate text given the input image. + +The [`DonutFeatureExtractor`] class is responsible for preprocessing the input image and +[`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`] decodes the generated target tokens to the target string. The +[`DonutProcessor`] wraps [`DonutFeatureExtractor`] and [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`] +into a single instance to both extract the input features and decode the predicted token ids. + +- Step-by-step Document Image Classification + +```py +>>> import re + +>>> from transformers import DonutProcessor, VisionEncoderDecoderModel +>>> from datasets import load_dataset +>>> import torch + +>>> processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip") +>>> model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip") + +>>> device = "cuda" if torch.cuda.is_available() else "cpu" +>>> model.to(device) # doctest: +IGNORE_RESULT + +>>> # load document image +>>> dataset = load_dataset("hf-internal-testing/example-documents", split="test") +>>> image = dataset[1]["image"] + +>>> # prepare decoder inputs +>>> task_prompt = "" +>>> decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids + +>>> pixel_values = processor(image, return_tensors="pt").pixel_values + +>>> outputs = model.generate( +... pixel_values.to(device), +... decoder_input_ids=decoder_input_ids.to(device), +... max_length=model.decoder.config.max_position_embeddings, +... early_stopping=True, +... pad_token_id=processor.tokenizer.pad_token_id, +... eos_token_id=processor.tokenizer.eos_token_id, +... use_cache=True, +... num_beams=1, +... bad_words_ids=[[processor.tokenizer.unk_token_id]], +... return_dict_in_generate=True, +... ) + +>>> sequence = processor.batch_decode(outputs.sequences)[0] +>>> sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") +>>> sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token +>>> print(processor.token2json(sequence)) +{'class': 'advertisement'} +``` + +- Step-by-step Document Parsing + +```py +>>> import re + +>>> from transformers import DonutProcessor, VisionEncoderDecoderModel +>>> from datasets import load_dataset +>>> import torch + +>>> processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2") +>>> model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2") + +>>> device = "cuda" if torch.cuda.is_available() else "cpu" +>>> model.to(device) # doctest: +IGNORE_RESULT + +>>> # load document image +>>> dataset = load_dataset("hf-internal-testing/example-documents", split="test") +>>> image = dataset[2]["image"] + +>>> # prepare decoder inputs +>>> task_prompt = "" +>>> decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids + +>>> pixel_values = processor(image, return_tensors="pt").pixel_values + +>>> outputs = model.generate( +... pixel_values.to(device), +... decoder_input_ids=decoder_input_ids.to(device), +... max_length=model.decoder.config.max_position_embeddings, +... early_stopping=True, +... pad_token_id=processor.tokenizer.pad_token_id, +... eos_token_id=processor.tokenizer.eos_token_id, +... use_cache=True, +... num_beams=1, +... bad_words_ids=[[processor.tokenizer.unk_token_id]], +... return_dict_in_generate=True, +... ) + +>>> sequence = processor.batch_decode(outputs.sequences)[0] +>>> sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") +>>> sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token +>>> print(processor.token2json(sequence)) +{'menu': {'nm': 'CINNAMON SUGAR', 'unitprice': '17,000', 'cnt': '1 x', 'price': '17,000'}, 'sub_total': {'subtotal_price': '17,000'}, 'total': {'total_price': '17,000', 'cashprice': '20,000', 'changeprice': '3,000'}} +``` + +- Step-by-step Document Visual Question Answering (DocVQA) + +```py +>>> import re + +>>> from transformers import DonutProcessor, VisionEncoderDecoderModel +>>> from datasets import load_dataset +>>> import torch + +>>> processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa") +>>> model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa") + +>>> device = "cuda" if torch.cuda.is_available() else "cpu" +>>> model.to(device) # doctest: +IGNORE_RESULT + +>>> # load document image from the DocVQA dataset +>>> dataset = load_dataset("hf-internal-testing/example-documents", split="test") +>>> image = dataset[0]["image"] + +>>> # prepare decoder inputs +>>> task_prompt = "{user_input}" +>>> question = "When is the coffee break?" +>>> prompt = task_prompt.replace("{user_input}", question) +>>> decoder_input_ids = processor.tokenizer(prompt, add_special_tokens=False, return_tensors="pt").input_ids + +>>> pixel_values = processor(image, return_tensors="pt").pixel_values + +>>> outputs = model.generate( +... pixel_values.to(device), +... decoder_input_ids=decoder_input_ids.to(device), +... max_length=model.decoder.config.max_position_embeddings, +... early_stopping=True, +... pad_token_id=processor.tokenizer.pad_token_id, +... eos_token_id=processor.tokenizer.eos_token_id, +... use_cache=True, +... num_beams=1, +... bad_words_ids=[[processor.tokenizer.unk_token_id]], +... return_dict_in_generate=True, +... ) + +>>> sequence = processor.batch_decode(outputs.sequences)[0] +>>> sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") +>>> sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token +>>> print(processor.token2json(sequence)) +{'question': 'When is the coffee break?', 'answer': '11-14 to 11:39 a.m.'} +``` + +See the [model hub](https://huggingface.co/models?filter=donut) to look for Donut checkpoints. + +## Training + +We refer to the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/donut). + +## DonutSwinConfig + +[[autodoc]] DonutSwinConfig + +## DonutFeatureExtractor + +[[autodoc]] DonutFeatureExtractor + - __call__ + +## DonutProcessor + +[[autodoc]] DonutProcessor + - __call__ + - from_pretrained + - save_pretrained + - batch_decode + - decode + +## DonutSwinModel + +[[autodoc]] DonutSwinModel + - forward \ No newline at end of file diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 2f53db07f078f0..d6444e0844ff54 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -190,6 +190,7 @@ "models.dialogpt": [], "models.distilbert": ["DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertTokenizer"], "models.dit": [], + "models.donut": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutProcessor", "DonutSwinConfig"], "models.dpr": [ "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPRConfig", @@ -641,6 +642,7 @@ _import_structure["models.convnext"].append("ConvNextFeatureExtractor") _import_structure["models.deit"].append("DeiTFeatureExtractor") _import_structure["models.detr"].append("DetrFeatureExtractor") + _import_structure["models.donut"].append("DonutFeatureExtractor") _import_structure["models.dpt"].append("DPTFeatureExtractor") _import_structure["models.flava"].extend(["FlavaFeatureExtractor", "FlavaProcessor"]) _import_structure["models.glpn"].append("GLPNFeatureExtractor") @@ -1099,6 +1101,13 @@ "DistilBertPreTrainedModel", ] ) + _import_structure["models.donut"].extend( + [ + "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", + "DonutSwinModel", + "DonutSwinPreTrainedModel", + ] + ) _import_structure["models.dpr"].extend( [ "DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -2984,6 +2993,7 @@ from .models.deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig from .models.detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig from .models.distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer + from .models.donut import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutProcessor, DonutSwinConfig from .models.dpr import ( DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig, @@ -3375,6 +3385,7 @@ from .models.convnext import ConvNextFeatureExtractor from .models.deit import DeiTFeatureExtractor from .models.detr import DetrFeatureExtractor + from .models.donut import DonutFeatureExtractor from .models.dpt import DPTFeatureExtractor from .models.flava import FlavaFeatureExtractor, FlavaProcessor from .models.glpn import GLPNFeatureExtractor @@ -3761,6 +3772,7 @@ DistilBertModel, DistilBertPreTrainedModel, ) + from .models.donut import DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, DonutSwinModel, DonutSwinPreTrainedModel from .models.dpr import ( DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, diff --git a/src/transformers/image_utils.py b/src/transformers/image_utils.py index dd7bb326993d34..e5a395341c0031 100644 --- a/src/transformers/image_utils.py +++ b/src/transformers/image_utils.py @@ -376,3 +376,25 @@ def flip_channel_order(self, image): image = self.to_numpy_array(image) return image[::-1, :, :] + + def rotate(self, image, angle, resample=PIL.Image.NEAREST, expand=0, center=None, translate=None, fillcolor=None): + """ + Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees + counter clockwise around its centre. + + Args: + image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`): + The image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before + rotating. + + Returns: + image: A rotated `PIL.Image.Image`. + """ + self._ensure_format_supported(image) + + if not isinstance(image, PIL.Image.Image): + image = self.to_pil_image(image) + + return image.rotate( + angle, resample=resample, expand=expand, center=center, translate=translate, fillcolor=fillcolor + ) diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 11887db91f8393..fdf315b2257d8b 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -52,6 +52,7 @@ dialogpt, distilbert, dit, + donut, dpr, dpt, electra, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index c65a2762a00029..c9e6156a3843d3 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -56,6 +56,7 @@ ("deit", "DeiTConfig"), ("detr", "DetrConfig"), ("distilbert", "DistilBertConfig"), + ("donut-swin", "DonutSwinConfig"), ("dpr", "DPRConfig"), ("dpt", "DPTConfig"), ("electra", "ElectraConfig"), @@ -181,6 +182,7 @@ ("deit", "DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("detr", "DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("distilbert", "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("donut-swin", "DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("dpr", "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("dpt", "DPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("electra", "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -304,6 +306,8 @@ ("dialogpt", "DialoGPT"), ("distilbert", "DistilBERT"), ("dit", "DiT"), + ("donut", "Donut"), + ("donut-swin", "DonutSwin"), ("dpr", "DPR"), ("dpt", "DPT"), ("electra", "ELECTRA"), @@ -420,6 +424,7 @@ ("data2vec-audio", "data2vec"), ("data2vec-text", "data2vec"), ("data2vec-vision", "data2vec"), + ("donut-swin", "donut"), ] ) diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index db581d03d8fb7e..5c5f86d040c8fe 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -46,6 +46,7 @@ ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("detr", "DetrFeatureExtractor"), + ("donut", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index bd4774c245b07b..0e026cb48d0c0d 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -56,6 +56,7 @@ ("deit", "DeiTModel"), ("detr", "DetrModel"), ("distilbert", "DistilBertModel"), + ("donut-swin", "DonutSwinModel"), ("dpr", "DPRQuestionEncoder"), ("dpt", "DPTModel"), ("electra", "ElectraModel"), diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index aed7b4b9761373..c6f4fd98316a44 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -38,6 +38,7 @@ PROCESSOR_MAPPING_NAMES = OrderedDict( [ ("clip", "CLIPProcessor"), + ("donut", "DonutProcessor"), ("flava", "FlavaProcessor"), ("groupvit", "CLIPProcessor"), ("layoutlmv2", "LayoutLMv2Processor"), diff --git a/src/transformers/models/donut/__init__.py b/src/transformers/models/donut/__init__.py new file mode 100644 index 00000000000000..a01f6b11a9a995 --- /dev/null +++ b/src/transformers/models/donut/__init__.py @@ -0,0 +1,76 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available + + +_import_structure = { + "configuration_donut_swin": ["DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "DonutSwinConfig"], + "processing_donut": ["DonutProcessor"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_donut_swin"] = [ + "DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST", + "DonutSwinModel", + "DonutSwinPreTrainedModel", + ] + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_donut"] = ["DonutFeatureExtractor"] + + +if TYPE_CHECKING: + from .configuration_donut_swin import DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, DonutSwinConfig + from .processing_donut import DonutProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_donut_swin import ( + DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, + DonutSwinModel, + DonutSwinPreTrainedModel, + ) + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_donut import DonutFeatureExtractor + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/donut/configuration_donut_swin.py b/src/transformers/models/donut/configuration_donut_swin.py new file mode 100644 index 00000000000000..d3316bdc79f685 --- /dev/null +++ b/src/transformers/models/donut/configuration_donut_swin.py @@ -0,0 +1,140 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Donut Swin Transformer model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", + # See all Donut models at https://huggingface.co/models?filter=donut-swin +} + + +class DonutSwinConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a + Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the Donut + [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 4): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + embed_dim (`int`, *optional*, defaults to 96): + Dimensionality of patch embedding. + depths (`list(int)`, *optional*, defaults to [2, 2, 6, 2]): + Depth of each layer in the Transformer encoder. + num_heads (`list(int)`, *optional*, defaults to [3, 6, 12, 24]): + Number of attention heads in each layer of the Transformer encoder. + window_size (`int`, *optional*, defaults to 7): + Size of windows. + mlp_ratio (`float`, *optional*, defaults to 4.0): + Ratio of MLP hidden dimensionality to embedding dimensionality. + qkv_bias (`bool`, *optional*, defaults to True): + Whether or not a learnable bias should be added to the queries, keys and values. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings and encoder. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + drop_path_rate (`float`, *optional*, defaults to 0.1): + Stochastic depth rate. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, + `"selu"` and `"gelu_new"` are supported. + use_absolute_embeddings (`bool`, *optional*, defaults to False): + Whether or not to add absolute position embeddings to the patch embeddings. + patch_norm (`bool`, *optional*, defaults to True): + Whether or not to add layer normalization after patch embedding. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + + Example: + + ```python + >>> from transformers import DonutSwinConfig, DonutSwinModel + + >>> # Initializing a Donut naver-clova-ix/donut-base style configuration + >>> configuration = DonutSwinConfig() + + >>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration + >>> model = DonutSwinModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "donut-swin" + + attribute_map = { + "num_attention_heads": "num_heads", + "num_hidden_layers": "num_layers", + } + + def __init__( + self, + image_size=224, + patch_size=4, + num_channels=3, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4.0, + qkv_bias=True, + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + drop_path_rate=0.1, + hidden_act="gelu", + use_absolute_embeddings=False, + patch_norm=True, + initializer_range=0.02, + layer_norm_eps=1e-5, + **kwargs + ): + super().__init__(**kwargs) + + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.embed_dim = embed_dim + self.depths = depths + self.num_layers = len(depths) + self.num_heads = num_heads + self.window_size = window_size + self.mlp_ratio = mlp_ratio + self.qkv_bias = qkv_bias + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.drop_path_rate = drop_path_rate + self.hidden_act = hidden_act + self.use_absolute_embeddings = use_absolute_embeddings + self.path_norm = patch_norm + self.layer_norm_eps = layer_norm_eps + self.initializer_range = initializer_range + # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel + # this indicates the channel dimension after the last stage of the model + self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) diff --git a/src/transformers/models/donut/convert_donut_to_pytorch.py b/src/transformers/models/donut/convert_donut_to_pytorch.py new file mode 100644 index 00000000000000..507f10cb776cf0 --- /dev/null +++ b/src/transformers/models/donut/convert_donut_to_pytorch.py @@ -0,0 +1,234 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Donut checkpoints using the original `donut-python` library. URL: https://github.com/clovaai/donut""" + +import argparse + +import torch +from datasets import load_dataset + +from donut import DonutModel +from transformers import ( + DonutFeatureExtractor, + DonutProcessor, + DonutSwinConfig, + DonutSwinModel, + MBartConfig, + MBartForCausalLM, + VisionEncoderDecoderModel, + XLMRobertaTokenizerFast, +) + + +def get_configs(model): + original_config = model.config + + encoder_config = DonutSwinConfig( + image_size=original_config.input_size, + patch_size=4, + depths=original_config.encoder_layer, + num_heads=[4, 8, 16, 32], + window_size=original_config.window_size, + embed_dim=128, + ) + decoder_config = MBartConfig( + is_decoder=True, + is_encoder_decoder=False, + add_cross_attention=True, + decoder_layers=original_config.decoder_layer, + max_position_embeddings=original_config.max_position_embeddings, + vocab_size=len( + model.decoder.tokenizer + ), # several special tokens are added to the vocab of XLMRobertaTokenizer, see repo on the hub (added_tokens.json) + scale_embedding=True, + add_final_layer_norm=True, + ) + + return encoder_config, decoder_config + + +def rename_key(name): + if "encoder.model" in name: + name = name.replace("encoder.model", "encoder") + if "decoder.model" in name: + name = name.replace("decoder.model", "decoder") + if "patch_embed.proj" in name: + name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection") + if "patch_embed.norm" in name: + name = name.replace("patch_embed.norm", "embeddings.norm") + if name.startswith("encoder"): + if "layers" in name: + name = "encoder." + name + if "attn.proj" in name: + name = name.replace("attn.proj", "attention.output.dense") + if "attn" in name and "mask" not in name: + name = name.replace("attn", "attention.self") + if "norm1" in name: + name = name.replace("norm1", "layernorm_before") + if "norm2" in name: + name = name.replace("norm2", "layernorm_after") + if "mlp.fc1" in name: + name = name.replace("mlp.fc1", "intermediate.dense") + if "mlp.fc2" in name: + name = name.replace("mlp.fc2", "output.dense") + + if name == "encoder.norm.weight": + name = "encoder.layernorm.weight" + if name == "encoder.norm.bias": + name = "encoder.layernorm.bias" + + return name + + +def convert_state_dict(orig_state_dict, model): + for key in orig_state_dict.copy().keys(): + val = orig_state_dict.pop(key) + + if "qkv" in key: + key_split = key.split(".") + layer_num = int(key_split[3]) + block_num = int(key_split[5]) + dim = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size + + if "weight" in key: + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight" + ] = val[:dim, :] + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight" + ] = val[dim : dim * 2, :] + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight" + ] = val[-dim:, :] + else: + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias" + ] = val[:dim] + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias" + ] = val[dim : dim * 2] + orig_state_dict[ + f"encoder.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias" + ] = val[-dim:] + elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: + # HuggingFace implementation doesn't use attn_mask buffer + # and model doesn't use final LayerNorms for the encoder + pass + else: + orig_state_dict[rename_key(key)] = val + + return orig_state_dict + + +def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): + # load original model + original_model = DonutModel.from_pretrained(model_name).eval() + + # load HuggingFace model + encoder_config, decoder_config = get_configs(original_model) + encoder = DonutSwinModel(encoder_config) + decoder = MBartForCausalLM(decoder_config) + model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) + model.eval() + + state_dict = original_model.state_dict() + new_state_dict = convert_state_dict(state_dict, model) + model.load_state_dict(new_state_dict) + + # verify results on scanned document + dataset = load_dataset("hf-internal-testing/example-documents") + image = dataset["test"][0]["image"].convert("RGB") + + tokenizer = XLMRobertaTokenizerFast.from_pretrained(model_name, from_slow=True) + feature_extractor = DonutFeatureExtractor( + do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] + ) + processor = DonutProcessor(feature_extractor, tokenizer) + pixel_values = processor(image, return_tensors="pt").pixel_values + + if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": + task_prompt = "{user_input}" + question = "When is the coffee break?" + task_prompt = task_prompt.replace("{user_input}", question) + elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": + task_prompt = "" + elif model_name in [ + "naver-clova-ix/donut-base-finetuned-cord-v1", + "naver-clova-ix/donut-base-finetuned-cord-v1-2560", + ]: + task_prompt = "" + elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": + task_prompt = "s_cord-v2>" + elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": + task_prompt = "" + elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: + # use a random prompt + task_prompt = "hello world" + else: + raise ValueError("Model name not supported") + prompt_tensors = original_model.decoder.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt")[ + "input_ids" + ] + + original_patch_embed = original_model.encoder.model.patch_embed(pixel_values) + patch_embeddings, _ = model.encoder.embeddings(pixel_values) + assert torch.allclose(original_patch_embed, patch_embeddings, atol=1e-3) + + # verify encoder hidden states + original_last_hidden_state = original_model.encoder(pixel_values) + last_hidden_state = model.encoder(pixel_values).last_hidden_state + assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2) + + # verify decoder hidden states + original_logits = original_model(pixel_values, prompt_tensors, None).logits + logits = model(pixel_values, decoder_input_ids=prompt_tensors).logits + assert torch.allclose(original_logits, logits, atol=1e-3) + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + print(f"Saving model and processor to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + processor.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + model.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model") + processor.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="naver-clova-ix/donut-base-finetuned-docvqa", + required=False, + type=str, + help="Name of the original model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + required=False, + type=str, + help="Path to the output PyTorch model directory.", + ) + parser.add_argument( + "--push_to_hub", + action="store_true", + help="Whether or not to push the converted model and processor to the 🤗 hub.", + ) + + args = parser.parse_args() + convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/src/transformers/models/donut/feature_extraction_donut.py b/src/transformers/models/donut/feature_extraction_donut.py new file mode 100644 index 00000000000000..09bf3a6ad1c157 --- /dev/null +++ b/src/transformers/models/donut/feature_extraction_donut.py @@ -0,0 +1,208 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for Donut.""" + +from typing import Optional, Tuple, Union + +import numpy as np +from PIL import Image, ImageOps + +from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin +from ...image_utils import ( + IMAGENET_STANDARD_MEAN, + IMAGENET_STANDARD_STD, + ImageFeatureExtractionMixin, + ImageInput, + is_torch_tensor, +) +from ...utils import TensorType, logging + + +logger = logging.get_logger(__name__) + + +class DonutFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): + r""" + Constructs a Donut feature extractor. + + This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users + should refer to this superclass for more information regarding those methods. + + Args: + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the shorter edge of the input to the minimum value of a certain `size`. + size (`Tuple(int)`, *optional*, defaults to [1920, 2560]): + Resize the shorter edge of the input to the minimum value of the given size. Should be a tuple of (width, + height). Only has an effect if `do_resize` is set to `True`. + resample (`int`, *optional*, defaults to `PIL.Image.BILINEAR`): + An optional resampling filter. This can be one of `PIL.Image.NEAREST`, `PIL.Image.BOX`, + `PIL.Image.BILINEAR`, `PIL.Image.HAMMING`, `PIL.Image.BICUBIC` or `PIL.Image.LANCZOS`. Only has an effect + if `do_resize` is set to `True`. + do_thumbnail (`bool`, *optional*, defaults to `True`): + Whether to thumbnail the input to the given `size`. + do_align_long_axis (`bool`, *optional*, defaults to `False`): + Whether to rotate the input if the height is greater than width. + do_pad (`bool`, *optional*, defaults to `True`): + Whether or not to pad the input to `size`. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether or not to normalize the input with mean and standard deviation. + image_mean (`List[int]`, defaults to `[0.5, 0.5, 0.5]`): + The sequence of means for each channel, to be used when normalizing images. + image_std (`List[int]`, defaults to `[0.5, 0.5, 0.5]`): + The sequence of standard deviations for each channel, to be used when normalizing images. + + """ + + model_input_names = ["pixel_values"] + + def __init__( + self, + do_resize=True, + size=[1920, 2560], + resample=Image.BILINEAR, + do_thumbnail=True, + do_align_long_axis=False, + do_pad=True, + do_normalize=True, + image_mean=None, + image_std=None, + **kwargs + ): + super().__init__(**kwargs) + self.do_resize = do_resize + self.size = size + self.resample = resample + self.do_thumbnail = do_thumbnail + self.do_align_long_axis = do_align_long_axis + self.do_pad = do_pad + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN + self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD + + def rotate_image(self, image, size): + if not isinstance(image, Image.Image): + image = self.to_pil_image(image) + + if (size[1] > size[0] and image.width > image.height) or (size[1] < size[0] and image.width < image.height): + image = self.rotate(image, angle=-90, expand=True) + + return image + + def thumbnail(self, image, size): + if not isinstance(image, Image.Image): + image = self.to_pil_image(image) + + image.thumbnail((size[0], size[1])) + + return image + + def pad(self, image: Image.Image, size: Tuple[int, int], random_padding: bool = False) -> Image.Image: + delta_width = size[0] - image.width + delta_height = size[1] - image.height + + if random_padding: + pad_width = np.random.randint(low=0, high=delta_width + 1) + pad_height = np.random.randint(low=0, high=delta_height + 1) + else: + pad_width = delta_width // 2 + pad_height = delta_height // 2 + + padding = (pad_width, pad_height, delta_width - pad_width, delta_height - pad_height) + return ImageOps.expand(image, padding) + + def __call__( + self, + images: ImageInput, + return_tensors: Optional[Union[str, TensorType]] = None, + random_padding=False, + **kwargs + ) -> BatchFeature: + """ + Main method to prepare for the model one or several image(s). + + + + NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass + PIL images. + + + + Args: + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a + number of channels, H and W are image height and width. + + random_padding (`bool`, *optional*, defaults to `False`): + Whether to randomly pad the input to `size`. + + return_tensors (`str` or [`~utils.TensorType`], *optional*, defaults to `'np'`): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **pixel_values** -- Pixel values to be fed to a model, of shape (batch_size, num_channels, height, + width). + """ + # Input type checking for clearer error + valid_images = False + + # Check that images has a valid type + if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): + valid_images = True + elif isinstance(images, (list, tuple)): + if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): + valid_images = True + + if not valid_images: + raise ValueError( + "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " + "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." + ) + + is_batched = bool( + isinstance(images, (list, tuple)) + and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) + ) + + if not is_batched: + images = [images] + + # transformations (rotating + resizing + thumbnailing + padding + normalization) + if self.do_align_long_axis: + images = [self.rotate_image(image, self.size) for image in images] + if self.do_resize and self.size is not None: + images = [ + self.resize(image=image, size=min(self.size), resample=self.resample, default_to_square=False) + for image in images + ] + if self.do_thumbnail and self.size is not None: + images = [self.thumbnail(image=image, size=self.size) for image in images] + if self.do_pad and self.size is not None: + images = [self.pad(image=image, size=self.size, random_padding=random_padding) for image in images] + if self.do_normalize: + images = [self.normalize(image=image, mean=self.image_mean, std=self.image_std) for image in images] + + # return as BatchFeature + data = {"pixel_values": images} + encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) + + return encoded_inputs diff --git a/src/transformers/models/donut/modeling_donut_swin.py b/src/transformers/models/donut/modeling_donut_swin.py new file mode 100644 index 00000000000000..78e5cc81c19885 --- /dev/null +++ b/src/transformers/models/donut/modeling_donut_swin.py @@ -0,0 +1,941 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Donut Swin Transformer model. + +This implementation is identical to a regular Swin Transformer, without final layer norm on top of the final hidden +states.""" + +import collections.abc +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...activations import ACT2FN +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, +) +from .configuration_donut_swin import DonutSwinConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "DonutSwinConfig" +_FEAT_EXTRACTOR_FOR_DOC = "AutoFeatureExtractor" + +# Base docstring +_CHECKPOINT_FOR_DOC = "https://huggingface.co/naver-clova-ix/donut-base" +_EXPECTED_OUTPUT_SHAPE = [1, 49, 768] + +DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "naver-clova-ix/donut-base", + # See all Donut Swin models at https://huggingface.co/models?filter=donut +] + + +@dataclass +# Copied from transformers.models.swin.modeling_swin.SwinEncoderOutput with Swin->DonutSwin +class DonutSwinEncoderOutput(ModelOutput): + """ + DonutSwin encoder's outputs, with potential hidden states and attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +# Copied from transformers.models.swin.modeling_swin.SwinModelOutput with Swin->DonutSwin +class DonutSwinModelOutput(ModelOutput): + """ + DonutSwin model's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): + Average pooling of the last layer hidden-state. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of + shape `(batch_size, hidden_size, height, width)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to + include the spatial dimensions. + """ + + last_hidden_state: torch.FloatTensor = None + pooler_output: Optional[torch.FloatTensor] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + reshaped_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +# Copied from transformers.models.swin.modeling_swin.window_partition +def window_partition(input_feature, window_size): + """ + Partitions the given input into windows. + """ + batch_size, height, width, num_channels = input_feature.shape + input_feature = input_feature.view( + batch_size, height // window_size, window_size, width // window_size, window_size, num_channels + ) + windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) + return windows + + +# Copied from transformers.models.swin.modeling_swin.window_reverse +def window_reverse(windows, window_size, height, width): + """ + Merges windows to produce higher resolution features. + """ + batch_size = math.floor(windows.shape[0] / (height * width / window_size / window_size)) + windows = windows.view(batch_size, height // window_size, width // window_size, window_size, window_size, -1) + windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(batch_size, height, width, -1) + return windows + + +# Copied from transformers.models.swin.modeling_swin.SwinEmbeddings with Swin->DonutSwin +class DonutSwinEmbeddings(nn.Module): + """ + Construct the patch and position embeddings. Optionally, also the mask token. + """ + + def __init__(self, config, use_mask_token=False): + super().__init__() + + self.patch_embeddings = DonutSwinPatchEmbeddings(config) + num_patches = self.patch_embeddings.num_patches + self.patch_grid = self.patch_embeddings.grid_size + self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None + + if config.use_absolute_embeddings: + self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.embed_dim)) + else: + self.position_embeddings = None + + self.norm = nn.LayerNorm(config.embed_dim) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward( + self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None + ) -> Tuple[torch.Tensor]: + embeddings, output_dimensions = self.patch_embeddings(pixel_values) + embeddings = self.norm(embeddings) + batch_size, seq_len, _ = embeddings.size() + + if bool_masked_pos is not None: + mask_tokens = self.mask_token.expand(batch_size, seq_len, -1) + # replace the masked visual tokens by mask_tokens + mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) + embeddings = embeddings * (1.0 - mask) + mask_tokens * mask + + if self.position_embeddings is not None: + embeddings = embeddings + self.position_embeddings + + embeddings = self.dropout(embeddings) + + return embeddings, output_dimensions + + +# Copied from transformers.models.swin.modeling_swin.SwinPatchEmbeddings +class DonutSwinPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.embed_dim + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1]) + + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) + + def maybe_pad(self, pixel_values, height, width): + if width % self.patch_size[1] != 0: + pad_values = (0, self.patch_size[1] - width % self.patch_size[1]) + pixel_values = nn.functional.pad(pixel_values, pad_values) + if height % self.patch_size[0] != 0: + pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0]) + pixel_values = nn.functional.pad(pixel_values, pad_values) + return pixel_values + + def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor, Tuple[int]]: + _, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + # pad the input to be divisible by self.patch_size, if needed + pixel_values = self.maybe_pad(pixel_values, height, width) + embeddings = self.projection(pixel_values) + _, _, height, width = embeddings.shape + output_dimensions = (height, width) + embeddings = embeddings.flatten(2).transpose(1, 2) + + return embeddings, output_dimensions + + +# Copied from transformers.models.swin.modeling_swin.SwinPatchMerging +class DonutSwinPatchMerging(nn.Module): + """ + Patch Merging Layer. + + Args: + input_resolution (`Tuple[int]`): + Resolution of input feature. + dim (`int`): + Number of input channels. + norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): + Normalization layer class. + """ + + def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: + super().__init__() + self.input_resolution = input_resolution + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def maybe_pad(self, input_feature, height, width): + should_pad = (height % 2 == 1) or (width % 2 == 1) + if should_pad: + pad_values = (0, 0, 0, width % 2, 0, height % 2) + input_feature = nn.functional.pad(input_feature, pad_values) + + return input_feature + + def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: + height, width = input_dimensions + # `dim` is height * width + batch_size, dim, num_channels = input_feature.shape + + input_feature = input_feature.view(batch_size, height, width, num_channels) + # pad input to be disible by width and height, if needed + input_feature = self.maybe_pad(input_feature, height, width) + # [batch_size, height/2, width/2, num_channels] + input_feature_0 = input_feature[:, 0::2, 0::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_1 = input_feature[:, 1::2, 0::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_2 = input_feature[:, 0::2, 1::2, :] + # [batch_size, height/2, width/2, num_channels] + input_feature_3 = input_feature[:, 1::2, 1::2, :] + # batch_size height/2 width/2 4*num_channels + input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) + input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C + + input_feature = self.norm(input_feature) + input_feature = self.reduction(input_feature) + + return input_feature + + +# Copied from transformers.models.swin.modeling_swin.drop_path +def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True): + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +# Copied from transformers.models.swin.modeling_swin.SwinDropPath +class DonutSwinDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +# Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->DonutSwin +class DonutSwinSelfAttention(nn.Module): + def __init__(self, config, dim, num_heads): + super().__init__() + if dim % num_heads != 0: + raise ValueError( + f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" + ) + + self.num_attention_heads = num_heads + self.attention_head_size = int(dim / num_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + window_size = config.window_size + self.window_size = ( + window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) + ) + + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads) + ) + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) + coords_flatten = torch.flatten(coords, 1) + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] + relative_coords = relative_coords.permute(1, 2, 0).contiguous() + relative_coords[:, :, 0] += self.window_size[0] - 1 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) + self.register_buffer("relative_position_index", relative_position_index) + + self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) + self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) + self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + batch_size, dim, num_channels = hidden_states.shape + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] + relative_position_bias = relative_position_bias.view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 + ) + + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() + attention_scores = attention_scores + relative_position_bias.unsqueeze(0) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in DonutSwinModel forward() function) + mask_shape = attention_mask.shape[0] + attention_scores = attention_scores.view( + batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim + ) + attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) + attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +# Copied from transformers.models.swin.modeling_swin.SwinSelfOutput +class DonutSwinSelfOutput(nn.Module): + def __init__(self, config, dim): + super().__init__() + self.dense = nn.Linear(dim, dim) + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +# Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->DonutSwin +class DonutSwinAttention(nn.Module): + def __init__(self, config, dim, num_heads): + super().__init__() + self.self = DonutSwinSelfAttention(config, dim, num_heads) + self.output = DonutSwinSelfOutput(config, dim) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.swin.modeling_swin.SwinIntermediate +class DonutSwinIntermediate(nn.Module): + def __init__(self, config, dim): + super().__init__() + self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.swin.modeling_swin.SwinOutput +class DonutSwinOutput(nn.Module): + def __init__(self, config, dim): + super().__init__() + self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +# Copied from transformers.models.swin.modeling_swin.SwinLayer with Swin->DonutSwin +class DonutSwinLayer(nn.Module): + def __init__(self, config, dim, input_resolution, num_heads, shift_size=0): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.shift_size = shift_size + self.window_size = config.window_size + self.input_resolution = input_resolution + self.set_shift_and_window_size(input_resolution) + self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) + self.attention = DonutSwinAttention(config, dim, num_heads) + self.drop_path = DonutSwinDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() + self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) + self.intermediate = DonutSwinIntermediate(config, dim) + self.output = DonutSwinOutput(config, dim) + + def set_shift_and_window_size(self, input_resolution): + if min(input_resolution) <= self.window_size: + # if window size is larger than input resolution, we don't partition windows + self.shift_size = 0 + self.window_size = min(input_resolution) + + def get_attn_mask(self, height, width): + if self.shift_size > 0: + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, height, width, 1)) + height_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + width_slices = ( + slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None), + ) + count = 0 + for height_slice in height_slices: + for width_slice in width_slices: + img_mask[:, height_slice, width_slice, :] = count + count += 1 + + mask_windows = window_partition(img_mask, self.window_size) + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + else: + attn_mask = None + return attn_mask + + def maybe_pad(self, hidden_states, height, width): + pad_right = (self.window_size - width % self.window_size) % self.window_size + pad_bottom = (self.window_size - height % self.window_size) % self.window_size + pad_values = (0, 0, 0, pad_right, 0, pad_bottom) + hidden_states = nn.functional.pad(hidden_states, pad_values) + return hidden_states, pad_values + + def forward( + self, + hidden_states: torch.Tensor, + input_dimensions: Tuple[int, int], + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + self.set_shift_and_window_size(input_dimensions) + height, width = input_dimensions + batch_size, _, channels = hidden_states.size() + shortcut = hidden_states + + hidden_states = self.layernorm_before(hidden_states) + hidden_states = hidden_states.view(batch_size, height, width, channels) + # pad hidden_states to multiples of window size + hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) + + _, height_pad, width_pad, _ = hidden_states.shape + # cyclic shift + if self.shift_size > 0: + shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_hidden_states = hidden_states + + # partition windows + hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) + hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) + attn_mask = self.get_attn_mask(height_pad, width_pad) + if attn_mask is not None: + attn_mask = attn_mask.to(hidden_states_windows.device) + + attention_outputs = self.attention( + hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions + ) + + attention_output = attention_outputs[0] + + attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) + shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) + + # reverse cyclic shift + if self.shift_size > 0: + attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + attention_windows = shifted_windows + + was_padded = pad_values[3] > 0 or pad_values[5] > 0 + if was_padded: + attention_windows = attention_windows[:, :height, :width, :].contiguous() + + attention_windows = attention_windows.view(batch_size, height * width, channels) + + hidden_states = shortcut + self.drop_path(attention_windows) + + layer_output = self.layernorm_after(hidden_states) + layer_output = self.intermediate(layer_output) + layer_output = hidden_states + self.output(layer_output) + + layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) + return layer_outputs + + +# Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->DonutSwin +class DonutSwinStage(nn.Module): + def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): + super().__init__() + self.config = config + self.dim = dim + self.blocks = nn.ModuleList( + [ + DonutSwinLayer( + config=config, + dim=dim, + input_resolution=input_resolution, + num_heads=num_heads, + shift_size=0 if (i % 2 == 0) else config.window_size // 2, + ) + for i in range(depth) + ] + ) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) + else: + self.downsample = None + + self.pointing = False + + def forward( + self, + hidden_states: torch.Tensor, + input_dimensions: Tuple[int, int], + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + height, width = input_dimensions + for i, layer_module in enumerate(self.blocks): + + layer_head_mask = head_mask[i] if head_mask is not None else None + + layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions) + + hidden_states = layer_outputs[0] + + if self.downsample is not None: + height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 + output_dimensions = (height, width, height_downsampled, width_downsampled) + hidden_states = self.downsample(layer_outputs[0], input_dimensions) + else: + output_dimensions = (height, width, height, width) + + stage_outputs = (hidden_states, output_dimensions) + + if output_attentions: + stage_outputs += layer_outputs[1:] + return stage_outputs + + +# Copied from transformers.models.swin.modeling_swin.SwinEncoder with Swin->DonutSwin +class DonutSwinEncoder(nn.Module): + def __init__(self, config, grid_size): + super().__init__() + self.num_layers = len(config.depths) + self.config = config + dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] + self.layers = nn.ModuleList( + [ + DonutSwinStage( + config=config, + dim=int(config.embed_dim * 2**i_layer), + input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)), + depth=config.depths[i_layer], + num_heads=config.num_heads[i_layer], + drop_path=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], + downsample=DonutSwinPatchMerging if (i_layer < self.num_layers - 1) else None, + ) + for i_layer in range(self.num_layers) + ] + ) + + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + input_dimensions: Tuple[int, int], + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple, DonutSwinEncoderOutput]: + all_input_dimensions = () + all_hidden_states = () if output_hidden_states else None + all_reshaped_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + if output_hidden_states: + batch_size, _, hidden_size = hidden_states.shape + # rearrange b (h w) c -> b c h w + reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) + reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) + all_hidden_states += (hidden_states,) + all_reshaped_hidden_states += (reshaped_hidden_state,) + + for i, layer_module in enumerate(self.layers): + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), hidden_states, input_dimensions, layer_head_mask + ) + else: + layer_outputs = layer_module(hidden_states, input_dimensions, layer_head_mask, output_attentions) + + hidden_states = layer_outputs[0] + output_dimensions = layer_outputs[1] + + input_dimensions = (output_dimensions[-2], output_dimensions[-1]) + all_input_dimensions += (input_dimensions,) + + if output_hidden_states: + batch_size, _, hidden_size = hidden_states.shape + # rearrange b (h w) c -> b c h w + reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) + reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) + all_hidden_states += (hidden_states,) + all_reshaped_hidden_states += (reshaped_hidden_state,) + + if output_attentions: + all_self_attentions += layer_outputs[2:] + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + + return DonutSwinEncoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + reshaped_hidden_states=all_reshaped_hidden_states, + ) + + +# Copied from transformers.models.swin.modeling_swin.SwinPreTrainedModel with Swin->DonutSwin +class DonutSwinPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = DonutSwinConfig + base_model_prefix = "swin" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, DonutSwinEncoder): + module.gradient_checkpointing = value + + +SWIN_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`DonutSwinConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +SWIN_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See + [`AutoFeatureExtractor.__call__`] for details. + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Donut Swin Model transformer outputting raw hidden-states without any specific head on top.", + SWIN_START_DOCSTRING, +) +class DonutSwinModel(DonutSwinPreTrainedModel): + def __init__(self, config, add_pooling_layer=True, use_mask_token=False): + super().__init__(config) + self.config = config + self.num_layers = len(config.depths) + self.num_features = int(config.embed_dim * 2 ** (self.num_layers - 1)) + + self.embeddings = DonutSwinEmbeddings(config, use_mask_token=use_mask_token) + self.encoder = DonutSwinEncoder(config, self.embeddings.patch_grid) + + self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(SWIN_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + processor_class=_FEAT_EXTRACTOR_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=DonutSwinModelOutput, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + bool_masked_pos: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, DonutSwinModelOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, len(self.config.depths)) + + embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos) + + encoder_outputs = self.encoder( + embedding_output, + input_dimensions, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = encoder_outputs[0] + + pooled_output = None + if self.pooler is not None: + pooled_output = self.pooler(sequence_output.transpose(1, 2)) + pooled_output = torch.flatten(pooled_output, 1) + + if not return_dict: + output = (sequence_output, pooled_output) + encoder_outputs[1:] + + return output + + return DonutSwinModelOutput( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, + ) diff --git a/src/transformers/models/donut/processing_donut.py b/src/transformers/models/donut/processing_donut.py new file mode 100644 index 00000000000000..1b00d894bd0878 --- /dev/null +++ b/src/transformers/models/donut/processing_donut.py @@ -0,0 +1,156 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Processor class for Donut. +""" +import re +import warnings +from contextlib import contextmanager + +from ...processing_utils import ProcessorMixin + + +class DonutProcessor(ProcessorMixin): + r""" + Constructs a Donut processor which wraps a Donut feature extractor and an XLMRoBERTa tokenizer into a single + processor. + + [`DonutProcessor`] offers all the functionalities of [`DonutFeatureExtractor`] and + [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and + [`~DonutProcessor.decode`] for more information. + + Args: + feature_extractor ([`DonutFeatureExtractor`]): + An instance of [`DonutFeatureExtractor`]. The feature extractor is a required input. + tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]): + An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input. + """ + feature_extractor_class = "AutoFeatureExtractor" + tokenizer_class = "AutoTokenizer" + + def __init__(self, feature_extractor, tokenizer): + super().__init__(feature_extractor, tokenizer) + self.current_processor = self.feature_extractor + self._in_target_context_manager = False + + def __call__(self, *args, **kwargs): + """ + When used in normal mode, this method forwards all its arguments to AutoFeatureExtractor's + [`~AutoFeatureExtractor.__call__`] and returns its output. If used in the context + [`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's + [`~DonutTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. + """ + # For backward compatibility + if self._in_target_context_manager: + return self.current_processor(*args, **kwargs) + + images = kwargs.pop("images", None) + text = kwargs.pop("text", None) + if len(args) > 0: + images = args[0] + args = args[1:] + + if images is None and text is None: + raise ValueError("You need to specify either an `images` or `text` input to process.") + + if images is not None: + inputs = self.feature_extractor(images, *args, **kwargs) + if text is not None: + encodings = self.tokenizer(text, **kwargs) + + if text is None: + return inputs + elif images is None: + return encodings + else: + inputs["labels"] = encodings["input_ids"] + return inputs + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer + to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the + docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) + + @contextmanager + def as_target_processor(self): + """ + Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR. + """ + warnings.warn( + "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " + "labels by using the argument `text` of the regular `__call__` method (either in the same call as " + "your images inputs, or in a separate call." + ) + self._in_target_context_manager = True + self.current_processor = self.tokenizer + yield + self.current_processor = self.feature_extractor + self._in_target_context_manager = False + + def token2json(self, tokens, is_inner_value=False): + """ + Convert a (generated) token sequence into an ordered JSON format. + """ + output = dict() + + while tokens: + start_token = re.search(r"", tokens, re.IGNORECASE) + if start_token is None: + break + key = start_token.group(1) + end_token = re.search(rf"", tokens, re.IGNORECASE) + start_token = start_token.group() + if end_token is None: + tokens = tokens.replace(start_token, "") + else: + end_token = end_token.group() + start_token_escaped = re.escape(start_token) + end_token_escaped = re.escape(end_token) + content = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", tokens, re.IGNORECASE) + if content is not None: + content = content.group(1).strip() + if r""): + leaf = leaf.strip() + if leaf in self.tokenizer.get_added_vocab() and leaf[0] == "<" and leaf[-2:] == "/>": + leaf = leaf[1:-2] # for categorical special tokens + output[key].append(leaf) + if len(output[key]) == 1: + output[key] = output[key][0] + + tokens = tokens[tokens.find(end_token) + len(end_token) :].strip() + if tokens[:6] == r"": # non-leaf nodes + return [output] + self.token2json(tokens[6:], is_inner_value=True) + + if len(output): + return [output] if is_inner_value else output + else: + return [] if is_inner_value else {"text_sequence": tokens} diff --git a/src/transformers/models/vision_encoder_decoder/convert_trocr_unilm_to_pytorch.py b/src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py similarity index 100% rename from src/transformers/models/vision_encoder_decoder/convert_trocr_unilm_to_pytorch.py rename to src/transformers/models/trocr/convert_trocr_unilm_to_pytorch.py diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index d636be655af284..96a93ecae942a7 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -1682,6 +1682,23 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DonutSwinModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class DonutSwinPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index 30228e022222bf..fa30432070a37b 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -52,6 +52,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) +class DonutFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + class DPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index 990f278b0d5066..3c3babd4037780 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -105,6 +105,7 @@ def _generate_supported_model_class_names( "deberta", "deberta-v2", "distilbert", + "donut-swin", "electra", "gpt2", "gpt_neo", diff --git a/tests/models/donut/__init__.py b/tests/models/donut/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/donut/test_feature_extraction_donut.py b/tests/models/donut/test_feature_extraction_donut.py new file mode 100644 index 00000000000000..38ccbf2075a9b1 --- /dev/null +++ b/tests/models/donut/test_feature_extraction_donut.py @@ -0,0 +1,203 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import numpy as np + +from transformers.testing_utils import require_torch, require_vision +from transformers.utils import is_torch_available, is_vision_available + +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs + + +if is_torch_available(): + import torch + +if is_vision_available(): + from PIL import Image + + from transformers import DonutFeatureExtractor + + +class DonutFeatureExtractionTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + num_channels=3, + image_size=18, + min_resolution=30, + max_resolution=400, + do_resize=True, + size=[20, 18], + do_thumbnail=True, + do_align_axis=False, + do_pad=True, + do_normalize=True, + image_mean=[0.5, 0.5, 0.5], + image_std=[0.5, 0.5, 0.5], + ): + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.image_size = image_size + self.min_resolution = min_resolution + self.max_resolution = max_resolution + self.do_resize = do_resize + self.size = size + self.do_thumbnail = do_thumbnail + self.do_align_axis = do_align_axis + self.do_pad = do_pad + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + + def prepare_feat_extract_dict(self): + return { + "do_resize": self.do_resize, + "size": self.size, + "do_thumbnail": self.do_thumbnail, + "do_align_long_axis": self.do_align_axis, + "do_pad": self.do_pad, + "do_normalize": self.do_normalize, + "image_mean": self.image_mean, + "image_std": self.image_std, + } + + +@require_torch +@require_vision +class DonutFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): + + feature_extraction_class = DonutFeatureExtractor if is_vision_available() else None + + def setUp(self): + self.feature_extract_tester = DonutFeatureExtractionTester(self) + + @property + def feat_extract_dict(self): + return self.feature_extract_tester.prepare_feat_extract_dict() + + def test_feat_extract_properties(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + self.assertTrue(hasattr(feature_extractor, "do_resize")) + self.assertTrue(hasattr(feature_extractor, "size")) + self.assertTrue(hasattr(feature_extractor, "do_thumbnail")) + self.assertTrue(hasattr(feature_extractor, "do_align_long_axis")) + self.assertTrue(hasattr(feature_extractor, "do_pad")) + self.assertTrue(hasattr(feature_extractor, "do_normalize")) + self.assertTrue(hasattr(feature_extractor, "image_mean")) + self.assertTrue(hasattr(feature_extractor, "image_std")) + + def test_batch_feature(self): + pass + + def test_call_pil(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PIL images + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + for image in image_inputs: + self.assertIsInstance(image, Image.Image) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + 1, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size[1], + self.feature_extract_tester.size[0], + ), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size[1], + self.feature_extract_tester.size[0], + ), + ) + + def test_call_numpy(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random numpy tensors + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + for image in image_inputs: + self.assertIsInstance(image, np.ndarray) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + 1, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size[1], + self.feature_extract_tester.size[0], + ), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size[1], + self.feature_extract_tester.size[0], + ), + ) + + def test_call_pytorch(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PyTorch tensors + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + for image in image_inputs: + self.assertIsInstance(image, torch.Tensor) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + 1, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size[1], + self.feature_extract_tester.size[0], + ), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + self.feature_extract_tester.size[1], + self.feature_extract_tester.size[0], + ), + ) diff --git a/tests/models/donut/test_modeling_donut_swin.py b/tests/models/donut/test_modeling_donut_swin.py new file mode 100644 index 00000000000000..f909d961880a97 --- /dev/null +++ b/tests/models/donut/test_modeling_donut_swin.py @@ -0,0 +1,464 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch Donut Swin model. """ + +import collections +import inspect +import os +import pickle +import tempfile +import unittest + +from transformers import DonutSwinConfig +from transformers.testing_utils import require_torch, slow, torch_device +from transformers.utils import is_torch_available, is_torch_fx_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor + + +if is_torch_available(): + import torch + from torch import nn + + from transformers import DonutSwinModel + from transformers.models.donut.modeling_donut_swin import DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST + +if is_torch_fx_available(): + from transformers.utils.fx import symbolic_trace + + +class DonutSwinModelTester: + def __init__( + self, + parent, + batch_size=13, + image_size=32, + patch_size=2, + num_channels=3, + embed_dim=16, + depths=[1, 2, 1], + num_heads=[2, 2, 4], + window_size=2, + mlp_ratio=2.0, + qkv_bias=True, + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + drop_path_rate=0.1, + hidden_act="gelu", + use_absolute_embeddings=False, + patch_norm=True, + initializer_range=0.02, + layer_norm_eps=1e-5, + is_training=True, + scope=None, + use_labels=True, + type_sequence_label_size=10, + encoder_stride=8, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.embed_dim = embed_dim + self.depths = depths + self.num_heads = num_heads + self.window_size = window_size + self.mlp_ratio = mlp_ratio + self.qkv_bias = qkv_bias + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.drop_path_rate = drop_path_rate + self.hidden_act = hidden_act + self.use_absolute_embeddings = use_absolute_embeddings + self.patch_norm = patch_norm + self.layer_norm_eps = layer_norm_eps + self.initializer_range = initializer_range + self.is_training = is_training + self.scope = scope + self.use_labels = use_labels + self.type_sequence_label_size = type_sequence_label_size + self.encoder_stride = encoder_stride + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + + labels = None + if self.use_labels: + labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + + config = self.get_config() + + return config, pixel_values, labels + + def get_config(self): + return DonutSwinConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + embed_dim=self.embed_dim, + depths=self.depths, + num_heads=self.num_heads, + window_size=self.window_size, + mlp_ratio=self.mlp_ratio, + qkv_bias=self.qkv_bias, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + drop_path_rate=self.drop_path_rate, + hidden_act=self.hidden_act, + use_absolute_embeddings=self.use_absolute_embeddings, + path_norm=self.patch_norm, + layer_norm_eps=self.layer_norm_eps, + initializer_range=self.initializer_range, + encoder_stride=self.encoder_stride, + ) + + def create_and_check_model(self, config, pixel_values, labels): + model = DonutSwinModel(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + + expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) + expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) + + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + ( + config, + pixel_values, + labels, + ) = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_torch +class DonutSwinModelTest(ModelTesterMixin, unittest.TestCase): + + all_model_classes = (DonutSwinModel,) if is_torch_available() else () + fx_compatible = True + + test_pruning = False + test_resize_embeddings = False + test_head_masking = False + + def setUp(self): + self.model_tester = DonutSwinModelTester(self) + self.config_tester = ConfigTester(self, config_class=DonutSwinConfig, embed_dim=37) + + def test_config(self): + self.create_and_test_config_common_properties() + self.config_tester.create_and_test_config_to_json_string() + self.config_tester.create_and_test_config_to_json_file() + self.config_tester.create_and_test_config_from_and_save_pretrained() + self.config_tester.create_and_test_config_with_num_labels() + self.config_tester.check_config_can_be_init_without_params() + self.config_tester.check_config_arguments_init() + + def create_and_test_config_common_properties(self): + return + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_inputs_embeds(self): + # DonutSwin does not use inputs_embeds + pass + + def test_model_common_attributes(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) + x = model.get_output_embeddings() + self.assertTrue(x is None or isinstance(x, nn.Linear)) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.attentions + expected_num_attentions = len(self.model_tester.depths) + self.assertEqual(len(attentions), expected_num_attentions) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + window_size_squared = config.window_size**2 + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.attentions + self.assertEqual(len(attentions), expected_num_attentions) + + self.assertListEqual( + list(attentions[0].shape[-3:]), + [self.model_tester.num_heads[0], window_size_squared, window_size_squared], + ) + out_len = len(outputs) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if hasattr(self.model_tester, "num_hidden_states_types"): + added_hidden_states = self.model_tester.num_hidden_states_types + else: + # also another +1 for reshaped_hidden_states + added_hidden_states = 2 + self.assertEqual(out_len + added_hidden_states, len(outputs)) + + self_attentions = outputs.attentions + + self.assertEqual(len(self_attentions), expected_num_attentions) + + self.assertListEqual( + list(self_attentions[0].shape[-3:]), + [self.model_tester.num_heads[0], window_size_squared, window_size_squared], + ) + + def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.hidden_states + + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 + ) + self.assertEqual(len(hidden_states), expected_num_layers) + + # DonutSwin has a different seq_length + patch_size = ( + config.patch_size + if isinstance(config.patch_size, collections.abc.Iterable) + else (config.patch_size, config.patch_size) + ) + + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [num_patches, self.model_tester.embed_dim], + ) + + reshaped_hidden_states = outputs.reshaped_hidden_states + self.assertEqual(len(reshaped_hidden_states), expected_num_layers) + + batch_size, num_channels, height, width = reshaped_hidden_states[0].shape + reshaped_hidden_states = ( + reshaped_hidden_states[0].view(batch_size, num_channels, height * width).permute(0, 2, 1) + ) + self.assertListEqual( + list(reshaped_hidden_states.shape[-2:]), + [num_patches, self.model_tester.embed_dim], + ) + + def test_hidden_states_output(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + image_size = ( + self.model_tester.image_size + if isinstance(self.model_tester.image_size, collections.abc.Iterable) + else (self.model_tester.image_size, self.model_tester.image_size) + ) + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + self.check_hidden_states_output(inputs_dict, config, model_class, image_size) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + self.check_hidden_states_output(inputs_dict, config, model_class, image_size) + + def test_hidden_states_output_with_padding(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.patch_size = 3 + + image_size = ( + self.model_tester.image_size + if isinstance(self.model_tester.image_size, collections.abc.Iterable) + else (self.model_tester.image_size, self.model_tester.image_size) + ) + patch_size = ( + config.patch_size + if isinstance(config.patch_size, collections.abc.Iterable) + else (config.patch_size, config.patch_size) + ) + + padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) + padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) + + @slow + def test_model_from_pretrained(self): + for model_name in DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = DonutSwinModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + if "embeddings" not in name and param.requires_grad: + self.assertIn( + ((param.data.mean() * 1e9).round() / 1e9).item(), + [0.0, 1.0], + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): + if not is_torch_fx_available() or not self.fx_compatible: + return + + configs_no_init = _config_zero_init(config) # To be sure we have no Nan + configs_no_init.return_dict = False + + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + model.to(torch_device) + model.eval() + inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) + + try: + if model.config.is_encoder_decoder: + model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward + labels = inputs.get("labels", None) + input_names = ["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask"] + if labels is not None: + input_names.append("labels") + + filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} + input_names = list(filtered_inputs.keys()) + + model_output = model(**filtered_inputs) + + traced_model = symbolic_trace(model, input_names) + traced_output = traced_model(**filtered_inputs) + else: + input_names = ["input_ids", "attention_mask", "token_type_ids", "pixel_values"] + + labels = inputs.get("labels", None) + start_positions = inputs.get("start_positions", None) + end_positions = inputs.get("end_positions", None) + if labels is not None: + input_names.append("labels") + if start_positions is not None: + input_names.append("start_positions") + if end_positions is not None: + input_names.append("end_positions") + + filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} + input_names = list(filtered_inputs.keys()) + + model_output = model(**filtered_inputs) + + traced_model = symbolic_trace(model, input_names) + traced_output = traced_model(**filtered_inputs) + + except RuntimeError as e: + self.fail(f"Couldn't trace module: {e}") + + def flatten_output(output): + flatten = [] + for x in output: + if isinstance(x, (tuple, list)): + flatten += flatten_output(x) + elif not isinstance(x, torch.Tensor): + continue + else: + flatten.append(x) + return flatten + + model_output = flatten_output(model_output) + traced_output = flatten_output(traced_output) + num_outputs = len(model_output) + + for i in range(num_outputs): + self.assertTrue( + torch.allclose(model_output[i], traced_output[i]), + f"traced {i}th output doesn't match model {i}th output for {model_class}", + ) + + # Test that the model can be serialized and restored properly + with tempfile.TemporaryDirectory() as tmp_dir_name: + pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") + try: + with open(pkl_file_name, "wb") as f: + pickle.dump(traced_model, f) + with open(pkl_file_name, "rb") as f: + loaded = pickle.load(f) + except Exception as e: + self.fail(f"Couldn't serialize / deserialize the traced model: {e}") + + loaded_output = loaded(**filtered_inputs) + loaded_output = flatten_output(loaded_output) + + for i in range(num_outputs): + self.assertTrue( + torch.allclose(model_output[i], loaded_output[i]), + f"serialized model {i}th output doesn't match model {i}th output for {model_class}", + ) diff --git a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py index 320cdd63306262..7570888097c533 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py @@ -13,14 +13,22 @@ # See the License for the specific language governing permissions and # limitations under the License. - +import re import tempfile import unittest from datasets import load_dataset from packaging import version -from transformers.testing_utils import require_torch, require_vision, slow, to_2tuple, torch_device +from transformers import DonutProcessor, TrOCRProcessor +from transformers.testing_utils import ( + require_sentencepiece, + require_torch, + require_vision, + slow, + to_2tuple, + torch_device, +) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask @@ -54,7 +62,7 @@ import PIL from PIL import Image - from transformers import TrOCRProcessor, ViTFeatureExtractor + from transformers import ViTFeatureExtractor @require_torch @@ -654,8 +662,8 @@ def default_processor(self): def test_inference_handwritten(self): model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-handwritten").to(torch_device) - ds = load_dataset("hf-internal-testing/fixtures_ocr", split="test") - image = Image.open(ds[0]["file"]).convert("RGB") + dataset = load_dataset("hf-internal-testing/fixtures_ocr", split="test") + image = Image.open(dataset[0]["file"]).convert("RGB") processor = self.default_processor pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) @@ -679,8 +687,8 @@ def test_inference_handwritten(self): def test_inference_printed(self): model = VisionEncoderDecoderModel.from_pretrained("microsoft/trocr-base-printed").to(torch_device) - ds = load_dataset("hf-internal-testing/fixtures_ocr", split="test") - image = Image.open(ds[1]["file"]).convert("RGB") + dataset = load_dataset("hf-internal-testing/fixtures_ocr", split="test") + image = Image.open(dataset[1]["file"]).convert("RGB") processor = self.default_processor pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) @@ -774,3 +782,197 @@ def generate_step(pixel_values): # should produce # ["a cat laying on top of a couch next to another cat"] self.assertEqual(preds, ["a cat laying on top of a couch next to another cat"]) + + +@require_vision +@require_torch +@require_sentencepiece +class DonutModelIntegrationTest(unittest.TestCase): + @slow + def test_inference_docvqa(self): + processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa") + model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa").to( + torch_device + ) + + dataset = load_dataset("hf-internal-testing/example-documents", split="test") + image = dataset[0]["image"] + + pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) + decoder_input_ids = processor.tokenizer( + "", add_special_tokens=False, return_tensors="pt" + ).input_ids.to(torch_device) + + # step 1: single forward pass + with torch.no_grad(): + outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) + logits = outputs.logits + + # verify the logits + expected_shape = torch.Size([1, 1, 57532]) + self.assertEqual(outputs.logits.shape, expected_shape) + + expected_slice = torch.tensor([24.2731, -6.4522, 32.4130]).to(torch_device) + self.assertTrue(torch.allclose(logits[0, 0, :3], expected_slice, atol=1e-4)) + + # step 2: generation + task_prompt = "{user_input}" + question = "When is the coffee break?" + prompt = task_prompt.replace("{user_input}", question) + decoder_input_ids = processor.tokenizer(prompt, add_special_tokens=False, return_tensors="pt").input_ids + decoder_input_ids = decoder_input_ids.to(torch_device) + + outputs = model.generate( + pixel_values, + decoder_input_ids=decoder_input_ids, + max_length=model.decoder.config.max_position_embeddings, + early_stopping=True, + pad_token_id=processor.tokenizer.pad_token_id, + eos_token_id=processor.tokenizer.eos_token_id, + use_cache=True, + num_beams=1, + bad_words_ids=[[processor.tokenizer.unk_token_id]], + output_scores=True, + return_dict_in_generate=True, + ) + sequence = processor.batch_decode(outputs.sequences)[0] + sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") + sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token + + # verify generated sequence + self.assertEqual( + sequence, " When is the coffee break? 11-14 to 11:39 a.m." + ) + + # verify scores + self.assertEqual(len(outputs.scores), 11) + self.assertTrue( + torch.allclose( + outputs.scores[0][0, :3], torch.tensor([5.3153, -3.5276, 13.4781], device=torch_device), atol=1e-4 + ) + ) + + @slow + def test_inference_cordv2(self): + processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2") + model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2").to( + torch_device + ) + + dataset = load_dataset("hf-internal-testing/example-documents", split="test") + image = dataset[2]["image"] + + pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) + decoder_input_ids = processor.tokenizer( + "", add_special_tokens=False, return_tensors="pt" + ).input_ids.to(torch_device) + + # step 1: single forward pass + with torch.no_grad(): + outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) + logits = outputs.logits + + # verify the logits + expected_shape = torch.Size((1, 1, model.decoder.config.vocab_size)) + self.assertEqual(outputs.logits.shape, expected_shape) + + expected_slice = torch.tensor([-27.4344, -3.2686, -19.3524], device=torch_device) + self.assertTrue(torch.allclose(logits[0, 0, :3], expected_slice, atol=1e-4)) + + # step 2: generation + task_prompt = "" + decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids + decoder_input_ids = decoder_input_ids.to(torch_device) + + outputs = model.generate( + pixel_values, + decoder_input_ids=decoder_input_ids, + max_length=model.decoder.config.max_position_embeddings, + early_stopping=True, + pad_token_id=processor.tokenizer.pad_token_id, + eos_token_id=processor.tokenizer.eos_token_id, + use_cache=True, + num_beams=1, + bad_words_ids=[[processor.tokenizer.unk_token_id]], + output_scores=True, + return_dict_in_generate=True, + ) + + sequence = processor.batch_decode(outputs.sequences)[0] + sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") + sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token + + # verify generated sequence + # fmt: off + expected_sequence = " CINNAMON SUGAR 17,000 1 x 17,000 17,000 17,000 20,000 3,000" # noqa: E231 + # fmt: on + self.assertEqual(sequence, expected_sequence) + + # verify scores + self.assertEqual(len(outputs.scores), 43) + self.assertTrue( + torch.allclose( + outputs.scores[0][0, :3], torch.tensor([-27.4344, -3.2686, -19.3524], device=torch_device), atol=1e-4 + ) + ) + + @slow + def test_inference_rvlcdip(self): + processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip") + model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip").to( + torch_device + ) + + dataset = load_dataset("hf-internal-testing/example-documents", split="test") + image = dataset[1]["image"] + + pixel_values = processor(images=image, return_tensors="pt").pixel_values.to(torch_device) + + # step 1: single forward pass + decoder_input_ids = processor.tokenizer( + "", add_special_tokens=False, return_tensors="pt" + ).input_ids.to(torch_device) + with torch.no_grad(): + outputs = model(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids) + logits = outputs.logits + + # verify the logits + expected_shape = torch.Size((1, 1, model.decoder.config.vocab_size)) + self.assertEqual(outputs.logits.shape, expected_shape) + + expected_slice = torch.tensor([-17.6490, -4.8381, -15.7577], device=torch_device) + self.assertTrue(torch.allclose(logits[0, 0, :3], expected_slice, atol=1e-4)) + + # step 2: generation + task_prompt = "" + decoder_input_ids = processor.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt").input_ids + decoder_input_ids = decoder_input_ids.to(torch_device) + + outputs = model.generate( + pixel_values, + decoder_input_ids=decoder_input_ids, + max_length=model.decoder.config.max_position_embeddings, + early_stopping=True, + pad_token_id=processor.tokenizer.pad_token_id, + eos_token_id=processor.tokenizer.eos_token_id, + use_cache=True, + num_beams=1, + bad_words_ids=[[processor.tokenizer.unk_token_id]], + output_scores=True, + return_dict_in_generate=True, + ) + + sequence = processor.batch_decode(outputs.sequences)[0] + sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(processor.tokenizer.pad_token, "") + sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token + + # verify generated sequence + self.assertEqual(sequence, "") + + # verify scores + self.assertEqual(len(outputs.scores), 4) + self.assertTrue( + torch.allclose( + outputs.scores[0][0, :3], torch.tensor([-17.6490, -4.8381, -15.7577], device=torch_device), atol=1e-4 + ) + ) diff --git a/utils/check_copies.py b/utils/check_copies.py index e2e0e1a53e4332..7d571736544687 100644 --- a/utils/check_copies.py +++ b/utils/check_copies.py @@ -471,6 +471,7 @@ def check_model_list_copy(overwrite=False, max_per_line=119): "Data2VecAudio": "Data2Vec", "Data2VecText": "Data2Vec", "Data2VecVision": "Data2Vec", + "DonutSwin": "Donut", "Marian": "MarianMT", "OpenAI GPT-2": "GPT-2", "OpenAI GPT": "GPT", diff --git a/utils/check_repo.py b/utils/check_repo.py index d2271e87ebf178..254467113d6cb4 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -206,6 +206,7 @@ ("data2vec-text", "data2vec"), ("data2vec-audio", "data2vec"), ("data2vec-vision", "data2vec"), + ("donut-swin", "donut"), ] ) diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 1941a7343a6bc9..0edda8ae5a4c3c 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -8,6 +8,7 @@ docs/source/en/model_doc/t5.mdx docs/source/en/model_doc/t5v1.1.mdx docs/source/en/model_doc/byt5.mdx docs/source/en/model_doc/tapex.mdx +docs/source/en/model_doc/donut.mdx docs/source/en/model_doc/encoder-decoder.mdx src/transformers/generation_utils.py src/transformers/models/albert/modeling_albert.py From 153d1361c7dcc91c7735cae73e1f594cfcab3e21 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Fri, 12 Aug 2022 18:52:49 +0200 Subject: [PATCH 094/539] Fix URLs (#18604) Co-authored-by: Niels Rogge --- docs/source/en/model_doc/donut.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/model_doc/donut.mdx b/docs/source/en/model_doc/donut.mdx index 9c9973be022e7c..7f692f113cb5a3 100644 --- a/docs/source/en/model_doc/donut.mdx +++ b/docs/source/en/model_doc/donut.mdx @@ -33,7 +33,7 @@ This model was contributed by [nielsr](https://huggingface.co/nielsr). The origi Tips: - The quickest way to get started with Donut is by checking the [tutorial - notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/donut), which show how to use the model + notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Donut), which show how to use the model at inference time as well as fine-tuning on custom data. - Donut is always used within the [VisionEncoderDecoder](vision-encoder-decoder) framework. @@ -188,7 +188,7 @@ See the [model hub](https://huggingface.co/models?filter=donut) to look for Donu ## Training -We refer to the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/donut). +We refer to the [tutorial notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Donut). ## DonutSwinConfig From 56ef0ba44765162f830873c140bd40bdc975cc34 Mon Sep 17 00:00:00 2001 From: Niklas Muennighoff Date: Fri, 12 Aug 2022 19:36:18 +0200 Subject: [PATCH 095/539] Update BLOOM parameter counts (#18531) * Update BLOOM parameter counts * Update BLOOM parameter counts --- docs/source/en/model_doc/bloom.mdx | 10 +++--- .../models/bloom/configuration_bloom.py | 10 +++--- .../models/bloom/modeling_bloom.py | 12 +++---- .../models/bloom/tokenization_bloom_fast.py | 10 +++--- tests/models/bloom/test_modeling_bloom.py | 34 +++++++++---------- tests/onnx/test_onnx_v2.py | 2 +- 6 files changed, 39 insertions(+), 39 deletions(-) diff --git a/docs/source/en/model_doc/bloom.mdx b/docs/source/en/model_doc/bloom.mdx index 79a45bd7bf1d48..cf415603d0fe8b 100644 --- a/docs/source/en/model_doc/bloom.mdx +++ b/docs/source/en/model_doc/bloom.mdx @@ -18,11 +18,11 @@ The BLOOM model has been proposed with its various versions through the [BigScie The architecture of BLOOM is essentially similar to GPT3 (auto-regressive model for next token prediction), but has been trained on 46 different languages and 13 programming languages. Several smaller versions of the models have been trained on the same dataset. BLOOM is available in the following versions: -- [bloom-350m](https://huggingface.co/bigscience/bloom-350m) -- [bloom-760m](https://huggingface.co/bigscience/bloom-760m) -- [bloom-1b3](https://huggingface.co/bigscience/bloom-1b3) -- [bloom-2b5](https://huggingface.co/bigscience/bloom-2b5) -- [bloom-6b3](https://huggingface.co/bigscience/bloom-6b3) +- [bloom-560m](https://huggingface.co/bigscience/bloom-560m) +- [bloom-1b1](https://huggingface.co/bigscience/bloom-1b1) +- [bloom-1b7](https://huggingface.co/bigscience/bloom-1b7) +- [bloom-3b](https://huggingface.co/bigscience/bloom-3b) +- [bloom-7b1](https://huggingface.co/bigscience/bloom-7b1) - [bloom](https://huggingface.co/bigscience/bloom) (176B parameters) diff --git a/src/transformers/models/bloom/configuration_bloom.py b/src/transformers/models/bloom/configuration_bloom.py index a33a6339b14e99..10acdcbc68e154 100644 --- a/src/transformers/models/bloom/configuration_bloom.py +++ b/src/transformers/models/bloom/configuration_bloom.py @@ -31,11 +31,11 @@ BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", - "bigscience/bloom-350m": "https://huggingface.co/bigscience/bloom-350m/blob/main/config.json", - "bigscience/bloom-760m": "https://huggingface.co/bigscience/bloom-760m/blob/main/config.json", - "bigscience/bloom-1b3": "https://huggingface.co/bigscience/bloom-1b3/blob/main/config.json", - "bigscience/bloom-2b5": "https://huggingface.co/bigscience/bloom-2b5/blob/main/config.json", - "bigscience/bloom-6b3": "https://huggingface.co/bigscience/bloom-6b3/blob/main/config.json", + "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", + "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", + "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", + "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", + "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } diff --git a/src/transformers/models/bloom/modeling_bloom.py b/src/transformers/models/bloom/modeling_bloom.py index a33054a3835113..21eaded45b0c76 100644 --- a/src/transformers/models/bloom/modeling_bloom.py +++ b/src/transformers/models/bloom/modeling_bloom.py @@ -38,17 +38,17 @@ logger = logging.get_logger(__name__) -_CHECKPOINT_FOR_DOC = "bigscience/bloom-350m" +_CHECKPOINT_FOR_DOC = "bigscience/bloom-560m" _CONFIG_FOR_DOC = "BloomConfig" _TOKENIZER_FOR_DOC = "BloomTokenizerFast" BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "bigscience/bigscience-small-testing", - "bigscience/bloom-350m", - "bigscience/bloom-760m", - "bigscience/bloom-1b3", - "bigscience/bloom-2b5", - "bigscience/bloom-6b3", + "bigscience/bloom-560m", + "bigscience/bloom-1b1", + "bigscience/bloom-1b7", + "bigscience/bloom-3b", + "bigscience/bloom-7b1", "bigscience/bloom", ] diff --git a/src/transformers/models/bloom/tokenization_bloom_fast.py b/src/transformers/models/bloom/tokenization_bloom_fast.py index 7c5f9b24072df7..1d6f405039a80b 100644 --- a/src/transformers/models/bloom/tokenization_bloom_fast.py +++ b/src/transformers/models/bloom/tokenization_bloom_fast.py @@ -36,11 +36,11 @@ PRETRAINED_VOCAB_FILES_MAP = { "tokenizer_file": { "bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json", - "bigscience/bloom-350m": "https://huggingface.co/bigscience/bloom-350m/blob/main/tokenizer.json", - "bigscience/bloom-760m": "https://huggingface.co/bigscience/bloom-760m/blob/main/tokenizer.json", - "bigscience/bloom-1b3": "https://huggingface.co/bigscience/bloom-1b3/blob/main/tokenizer.json", - "bigscience/bloom-2b5": "https://huggingface.co/bigscience/bloom-2b5/blob/main/tokenizer.json", - "bigscience/bloom-6b3": "https://huggingface.co/bigscience/bloom-2b5/blob/main/tokenizer.json", + "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json", + "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json", + "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json", + "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json", + "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json", "bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json", }, } diff --git a/tests/models/bloom/test_modeling_bloom.py b/tests/models/bloom/test_modeling_bloom.py index 4570cb767326c0..12f66b63a837a3 100644 --- a/tests/models/bloom/test_modeling_bloom.py +++ b/tests/models/bloom/test_modeling_bloom.py @@ -379,27 +379,27 @@ def test_model_from_pretrained(self): def test_simple_generation(self): # This test is a bit flaky. For some GPU architectures, pytorch sets by default allow_fp16_reduced_precision_reduction = True and some operations # do not give the same results under this configuration, especially torch.baddmm and torch.bmm. https://pytorch.org/docs/stable/notes/numerical_accuracy.html#fp16-on-mi200 - # As we leave the default value (True) for allow_fp16_reduced_precision_reduction , the tests failed when running in half-precision with smaller models (350m) + # As we leave the default value (True) for allow_fp16_reduced_precision_reduction , the tests failed when running in half-precision with smaller models (560m) # Please see: https://pytorch.org/docs/stable/notes/cuda.html#reduced-precision-reduction-in-fp16-gemms # This discrepancy is observed only when using small models and seems to be stable for larger models. # Our conclusion is that these operations are flaky for small inputs but seems to be stable for larger inputs (for the functions `baddmm` and `bmm`), and therefore for larger models. # Here is a summary of an ablation study of our observations # EXPECTED_OUTPUT = "I enjoy walking with my cute dog, and I love to watch the kids play. I am a very active person, and I am a very good listener. I am a very good person, and I am a very good person. I am a" - # 350m + allow_fp16_reduced_precision_reduction = False + torch.bmm ==> PASS - # 350m + allow_fp16_reduced_precision_reduction = False + torch.baddm ==> PASS - # 350m + allow_fp16_reduced_precision_reduction = True + torch.baddm ==> PASS - # 350m + allow_fp16_reduced_precision_reduction = True + torch.bmm ==> FAIL + # 560m + allow_fp16_reduced_precision_reduction = False + torch.bmm ==> PASS + # 560m + allow_fp16_reduced_precision_reduction = False + torch.baddm ==> PASS + # 560m + allow_fp16_reduced_precision_reduction = True + torch.baddm ==> PASS + # 560m + allow_fp16_reduced_precision_reduction = True + torch.bmm ==> FAIL # EXPECTED_OUTPUT = "I enjoy walking with my cute dog, but I also enjoy hiking, biking, and swimming. I love to cook and bake. I love to cook and bake. I love to cook and bake. I love to cook and bake. I love" - # >=760m + allow_fp16_reduced_precision_reduction = True + torch.baddm ==> PASS (for use_cache=True and use_cache=False) - # >=760m + allow_fp16_reduced_precision_reduction = True + torch.bmm ==> PASS - # >=760m + allow_fp16_reduced_precision_reduction = False + torch.bmm ==> PASS + # >=1b1 + allow_fp16_reduced_precision_reduction = True + torch.baddm ==> PASS (for use_cache=True and use_cache=False) + # >=1b1 + allow_fp16_reduced_precision_reduction = True + torch.bmm ==> PASS + # >=1b1 + allow_fp16_reduced_precision_reduction = False + torch.bmm ==> PASS - path_350m = "bigscience/bloom-350m" - model = BloomForCausalLM.from_pretrained(path_350m, use_cache=True, revision="gs555750").cuda() + path_560m = "bigscience/bloom-560m" + model = BloomForCausalLM.from_pretrained(path_560m, use_cache=True, revision="gs555750").cuda() model = model.eval() - tokenizer = BloomTokenizerFast.from_pretrained(path_350m) + tokenizer = BloomTokenizerFast.from_pretrained(path_560m) input_sentence = "I enjoy walking with my cute dog" # This output has been obtained using fp32 model on the huggingface DGX workstation - NVIDIA A100 GPU @@ -416,10 +416,10 @@ def test_simple_generation(self): @slow @require_torch_gpu def test_batch_generation(self): - path_350m = "bigscience/bloom-350m" - model = BloomForCausalLM.from_pretrained(path_350m, use_cache=True, revision="gs555750").cuda() + path_560m = "bigscience/bloom-560m" + model = BloomForCausalLM.from_pretrained(path_560m, use_cache=True, revision="gs555750").cuda() model = model.eval() - tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left") + tokenizer = BloomTokenizerFast.from_pretrained(path_560m, padding_side="left") input_sentence = ["I enjoy walking with my cute dog", "I enjoy walking with my cute dog"] @@ -437,10 +437,10 @@ def test_batch_generation(self): @require_torch_gpu def test_batch_generation_padd(self): - path_350m = "bigscience/bloom-350m" - model = BloomForCausalLM.from_pretrained(path_350m, use_cache=True, revision="gs555750").cuda() + path_560m = "bigscience/bloom-560m" + model = BloomForCausalLM.from_pretrained(path_560m, use_cache=True, revision="gs555750").cuda() model = model.eval() - tokenizer = BloomTokenizerFast.from_pretrained(path_350m, padding_side="left") + tokenizer = BloomTokenizerFast.from_pretrained(path_560m, padding_side="left") input_sentence = ["I enjoy walking with my cute dog", "Hello my name is"] input_sentence_without_pad = "Hello my name is" diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index 5634abc7706856..79eff60cefed63 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -215,7 +215,7 @@ def test_values_override(self): } PYTORCH_EXPORT_WITH_PAST_MODELS = { - ("bloom", "bigscience/bloom-350m"), + ("bloom", "bigscience/bloom-560m"), ("gpt2", "gpt2"), ("gpt-neo", "EleutherAI/gpt-neo-125M"), } From 37c599184396206e84df6155ed3b80bd3a2ce815 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Fri, 12 Aug 2022 10:49:59 -0700 Subject: [PATCH 096/539] [doc] fix anchors (#18591) the manual anchors end up being duplicated with automatically added anchors and no longer work. --- docs/source/en/glossary.mdx | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/docs/source/en/glossary.mdx b/docs/source/en/glossary.mdx index b6cb2259d67da7..a61eb86eaab48d 100644 --- a/docs/source/en/glossary.mdx +++ b/docs/source/en/glossary.mdx @@ -44,7 +44,7 @@ specific language governing permissions and limitations under the License. Every model is different yet bears similarities with the others. Therefore most models use the same inputs, which are detailed here alongside usage examples. - + ### Input IDs @@ -113,7 +113,7 @@ we will see because this is the way a [`BertModel`] is going to expect its inputs. - + ### Attention mask @@ -171,7 +171,7 @@ in the dictionary returned by the tokenizer under the key "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ``` - + ### Token Type IDs @@ -224,7 +224,7 @@ second sequence, corresponding to the "question", has all its tokens represented Some models, like [`XLNetModel`] use an additional token represented by a `2`. - + ### Position IDs @@ -238,7 +238,7 @@ absolute positional embeddings. Absolute positional embeddings are selected in the range `[0, config.max_position_embeddings - 1]`. Some models use other types of positional embeddings, such as sinusoidal position embeddings or relative position embeddings. - + ### Labels @@ -266,7 +266,7 @@ These labels are different according to the model head, for example: The base models (e.g., [`BertModel`]) do not accept labels, as these are the base transformer models, simply outputting features. - + ### Decoder input IDs @@ -279,7 +279,6 @@ such models, passing the `labels` is the preferred way to handle training. Please check each model's docs to see how they handle these input IDs for sequence to sequence training. - ### Feed Forward Chunking From b3ff7c680cf195d33013483cc98f7156de8bf287 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Fri, 12 Aug 2022 10:50:52 -0700 Subject: [PATCH 097/539] [fsmt] deal with -100 indices in decoder ids (#18592) * [fsmt] deal with -100 indices in decoder ids Fixes: https://github.com/huggingface/transformers/issues/17945 decoder ids get the default index -100, which breaks the model - like t5 and many other models add a fix to replace -100 with the correct pad index. For some reason this use case hasn't been used with this model until recently - so this issue was there since the beginning it seems. Any suggestions to how to add a simple test here? or perhaps we have something similar already? user's script is quite massive. * style --- src/transformers/models/fsmt/modeling_fsmt.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index f469266d7454f6..8270a70e9ee927 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -372,6 +372,10 @@ def _check_shapes(shape_1, shape2): def shift_tokens_right(input_ids, pad_token_id): """Shift input ids one token to the right, and wrap the last non pad token (usually ).""" + + # replace possible -100 values in labels by `pad_token_id` + input_ids.masked_fill_(input_ids == -100, pad_token_id) + prev_output_tokens = input_ids.clone() index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1) prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze() From 1ccd2515ed6d7da4ec46fe94aedbd8a86a2cde8e Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Fri, 12 Aug 2022 20:04:38 +0200 Subject: [PATCH 098/539] small change (#18584) --- tests/trainer/test_trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index 9cdb02468b3034..f48265ffa58168 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -2398,7 +2398,7 @@ def test_bnb_adam8bit_no_bnb(self): # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. - with patch.dict("sys.modules", {"bnb.optim": None}): + with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) From d6eeb871706db0d64ab9ffd79f9545d95286b536 Mon Sep 17 00:00:00 2001 From: Karim Foda <35491698+KMFODA@users.noreply.github.com> Date: Sun, 14 Aug 2022 17:27:13 +0200 Subject: [PATCH 099/539] Flax Remat for LongT5 (#17994) * [Flax] Add remat (gradient checkpointing) * fix variable naming in test * flip: checkpoint using a method * fix naming * fix class naming * apply PVP's suggestions from code review * add gradient_checkpointing to examples * Add gradient_checkpointing to run_mlm_flax * Add remat to longt5 * Add gradient checkpointing test longt5 * Fix args errors * Fix remaining tests * Make fixup & quality fixes * replace kwargs * remove unecessary kwargs * Make fixup changes * revert long_t5_flax changes * Remove return_dict and copy to LongT5 * Remove test_gradient_checkpointing Co-authored-by: sanchit-gandhi --- .../flax/language-modeling/run_mlm_flax.py | 9 ++ .../summarization/run_summarization_flax.py | 9 ++ .../models/longt5/modeling_flax_longt5.py | 75 +++++++++++---- .../models/t5/modeling_flax_t5.py | 95 +++++++++++++++---- 4 files changed, 149 insertions(+), 39 deletions(-) diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index 65f6a2285d9c34..408e09fc111cb3 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -107,6 +107,12 @@ class TrainingArguments: default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) + gradient_checkpointing: bool = field( + default=False, + metadata={ + "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." + }, + ) def __post_init__(self): if self.output_dir is not None: @@ -640,6 +646,9 @@ def group_texts(examples): dtype=getattr(jnp, model_args.dtype), ) + if training_args.gradient_checkpointing: + model.enable_gradient_checkpointing() + # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index c193fe0bc3745a..2813c88a3bd6fd 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -121,6 +121,12 @@ class TrainingArguments: default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) + gradient_checkpointing: bool = field( + default=False, + metadata={ + "help": "If True, use gradient checkpointing to save memory at the expense of slower backward pass." + }, + ) def __post_init__(self): if self.output_dir is not None: @@ -535,6 +541,9 @@ def main(): dtype=getattr(jnp, model_args.dtype), ) + if training_args.gradient_checkpointing: + model.enable_gradient_checkpointing() + if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") diff --git a/src/transformers/models/longt5/modeling_flax_longt5.py b/src/transformers/models/longt5/modeling_flax_longt5.py index 766dc36888e228..224515cd12a200 100644 --- a/src/transformers/models/longt5/modeling_flax_longt5.py +++ b/src/transformers/models/longt5/modeling_flax_longt5.py @@ -25,6 +25,7 @@ import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask +from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey @@ -53,6 +54,8 @@ _CONFIG_FOR_DOC = "LongT5Config" _TOKENIZER_FOR_DOC = "T5Tokenizer" +remat = nn_partitioning.remat + # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right def shift_tokens_right(input_ids: np.array, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray: @@ -1356,7 +1359,6 @@ def __call__( encoder_attention_mask=None, encoder_decoder_position_bias=None, output_attentions=False, - return_dict=True, deterministic=True, init_cache=False, ): @@ -1377,13 +1379,31 @@ def __call__( class FlaxLongT5BlockCollection(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal - self.blocks = [ - FlaxLongT5LayerCollection(self.config, has_relative_attention_bias=(i == 0), dtype=self.dtype, name=str(i)) - for i in range(self.config.num_layers) - ] + if self.gradient_checkpointing: + FlaxLongT5CheckpointLayer = remat(FlaxLongT5LayerCollection, static_argnums=(6, 7, 8)) + self.blocks = [ + FlaxLongT5CheckpointLayer( + self.config, + has_relative_attention_bias=(i == 0), + dtype=self.dtype, + name=str(i), + ) + for i in range(self.config.num_layers) + ] + else: + self.blocks = [ + FlaxLongT5LayerCollection( + self.config, + has_relative_attention_bias=(i == 0), + dtype=self.dtype, + name=str(i), + ) + for i in range(self.config.num_layers) + ] def __call__( self, @@ -1409,14 +1429,14 @@ def __call__( layer_outputs = layer_module( hidden_states, - attention_mask=attention_mask, - position_bias=position_bias, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - encoder_decoder_position_bias=encoder_decoder_position_bias, - output_attentions=output_attentions, - deterministic=deterministic, - init_cache=init_cache, + attention_mask, + position_bias, + encoder_hidden_states, + encoder_attention_mask, + encoder_decoder_position_bias, + output_attentions, + deterministic, + init_cache, ) hidden_states = layer_outputs[0] @@ -1447,11 +1467,14 @@ class FlaxLongT5Stack(nn.Module): config: LongT5Config embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal - self.block = FlaxLongT5BlockCollection(self.config, dtype=self.dtype) + self.block = FlaxLongT5BlockCollection( + self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing + ) self.final_layer_norm = FlaxLongT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) @@ -1989,6 +2012,7 @@ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs class FlaxLongT5Module(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder @@ -2005,12 +2029,22 @@ def setup(self): encoder_config = copy.deepcopy(self.config) encoder_config.causal = False - self.encoder = FlaxLongT5Stack(encoder_config, embed_tokens=self.shared, dtype=self.dtype) + self.encoder = FlaxLongT5Stack( + encoder_config, + embed_tokens=self.shared, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.num_layers = self.config.num_decoder_layers - self.decoder = FlaxLongT5Stack(decoder_config, embed_tokens=self.shared, dtype=self.dtype) + self.decoder = FlaxLongT5Stack( + decoder_config, + embed_tokens=self.shared, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) def __call__( self, @@ -2104,6 +2138,7 @@ class FlaxLongT5Model(FlaxLongT5PreTrainedModel): class FlaxLongT5ForConditionalGenerationModule(nn.Module): config: LongT5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder @@ -2124,13 +2159,17 @@ def setup(self): encoder_config.causal = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False - self.encoder = FlaxLongT5Stack(encoder_config, self.shared, dtype=self.dtype) + self.encoder = FlaxLongT5Stack( + encoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing + ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = self.config.num_decoder_layers - self.decoder = FlaxLongT5Stack(decoder_config, self.shared, dtype=self.dtype) + self.decoder = FlaxLongT5Stack( + decoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing + ) self.lm_head = nn.Dense( self.config.vocab_size, diff --git a/src/transformers/models/t5/modeling_flax_t5.py b/src/transformers/models/t5/modeling_flax_t5.py index 06ad5105429767..918a605fc4813a 100644 --- a/src/transformers/models/t5/modeling_flax_t5.py +++ b/src/transformers/models/t5/modeling_flax_t5.py @@ -25,6 +25,7 @@ import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask +from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey @@ -53,6 +54,8 @@ _CONFIG_FOR_DOC = "T5Config" _TOKENIZER_FOR_DOC = "T5Tokenizer" +remat = nn_partitioning.remat + # Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right def shift_tokens_right(input_ids: np.array, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray: @@ -622,7 +625,6 @@ def __call__( encoder_attention_mask=None, encoder_decoder_position_bias=None, output_attentions=False, - return_dict=True, deterministic=True, init_cache=False, ): @@ -642,13 +644,31 @@ def __call__( class FlaxT5BlockCollection(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal - self.blocks = [ - FlaxT5LayerCollection(self.config, has_relative_attention_bias=(i == 0), dtype=self.dtype, name=str(i)) - for i in range(self.config.num_layers) - ] + if self.gradient_checkpointing: + FlaxT5CheckpointLayer = remat(FlaxT5LayerCollection, static_argnums=(6, 7, 8)) + self.blocks = [ + FlaxT5CheckpointLayer( + self.config, + has_relative_attention_bias=(i == 0), + dtype=self.dtype, + name=str(i), + ) + for i in range(self.config.num_layers) + ] + else: + self.blocks = [ + FlaxT5LayerCollection( + self.config, + has_relative_attention_bias=(i == 0), + dtype=self.dtype, + name=str(i), + ) + for i in range(self.config.num_layers) + ] def __call__( self, @@ -674,14 +694,14 @@ def __call__( layer_outputs = layer_module( hidden_states, - attention_mask=attention_mask, - position_bias=position_bias, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - encoder_decoder_position_bias=encoder_decoder_position_bias, - output_attentions=output_attentions, - deterministic=deterministic, - init_cache=init_cache, + attention_mask, + position_bias, + encoder_hidden_states, + encoder_attention_mask, + encoder_decoder_position_bias, + output_attentions, + deterministic, + init_cache, ) hidden_states = layer_outputs[0] @@ -711,11 +731,14 @@ class FlaxT5Stack(nn.Module): config: T5Config embed_tokens: nn.Embed dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False def setup(self): self.causal = self.config.causal - self.block = FlaxT5BlockCollection(self.config, dtype=self.dtype) + self.block = FlaxT5BlockCollection( + self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing + ) self.final_layer_norm = FlaxT5LayerNorm( self.config.d_model, eps=self.config.layer_norm_epsilon, dtype=self.dtype ) @@ -919,11 +942,19 @@ def __init__( seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, + gradient_checkpointing: bool = False, **kwargs ): - module = self.module_class(config=config, dtype=dtype, **kwargs) + module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + def enable_gradient_checkpointing(self): + self._module = self.module_class( + config=self.config, + dtype=self.dtype, + gradient_checkpointing=True, + ) + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") @@ -1248,6 +1279,7 @@ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, **kwargs class FlaxT5Module(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder @@ -1264,12 +1296,22 @@ def setup(self): encoder_config = copy.deepcopy(self.config) encoder_config.causal = False - self.encoder = FlaxT5Stack(encoder_config, embed_tokens=self.shared, dtype=self.dtype) + self.encoder = FlaxT5Stack( + encoder_config, + embed_tokens=self.shared, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.num_layers = self.config.num_decoder_layers - self.decoder = FlaxT5Stack(decoder_config, embed_tokens=self.shared, dtype=self.dtype) + self.decoder = FlaxT5Stack( + decoder_config, + embed_tokens=self.shared, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) def __call__( self, @@ -1364,6 +1406,7 @@ class FlaxT5Model(FlaxT5PreTrainedModel): class FlaxT5EncoderModule(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False def setup(self): self.shared = nn.Embed( @@ -1376,7 +1419,12 @@ def setup(self): encoder_config.is_decoder = False encoder_config.is_encoder_decoder = False encoder_config.causal = False - self.encoder = FlaxT5Stack(encoder_config, embed_tokens=self.shared, dtype=self.dtype) + self.encoder = FlaxT5Stack( + encoder_config, + embed_tokens=self.shared, + dtype=self.dtype, + gradient_checkpointing=self.gradient_checkpointing, + ) def __call__( self, @@ -1384,7 +1432,7 @@ def __call__( attention_mask=None, output_attentions=False, output_hidden_states=False, - return_dict=True, + return_dict: bool = True, deterministic: bool = True, ): @@ -1445,6 +1493,7 @@ def __call__( class FlaxT5ForConditionalGenerationModule(nn.Module): config: T5Config dtype: jnp.dtype = jnp.float32 # the dtype of the computation + gradient_checkpointing: bool = False def _get_encoder_module(self): return self.encoder @@ -1465,13 +1514,17 @@ def setup(self): encoder_config.causal = False encoder_config.use_cache = False encoder_config.is_encoder_decoder = False - self.encoder = FlaxT5Stack(encoder_config, self.shared, dtype=self.dtype) + self.encoder = FlaxT5Stack( + encoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing + ) decoder_config = copy.deepcopy(self.config) decoder_config.causal = True decoder_config.is_encoder_decoder = False decoder_config.num_layers = self.config.num_decoder_layers - self.decoder = FlaxT5Stack(decoder_config, self.shared, dtype=self.dtype) + self.decoder = FlaxT5Stack( + decoder_config, self.shared, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing + ) self.lm_head = nn.Dense( self.config.vocab_size, From 9cf274685a0755c7080918951b332eba19f08601 Mon Sep 17 00:00:00 2001 From: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Date: Tue, 16 Aug 2022 16:34:51 +0530 Subject: [PATCH 100/539] mac m1 `mps` integration (#18598) * mac m1 `mps` integration * Update docs/source/en/main_classes/trainer.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * addressing comments * Apply suggestions from code review Co-authored-by: Dan Saattrup Nielsen <47701536+saattrupdan@users.noreply.github.com> * resolve comment Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Dan Saattrup Nielsen <47701536+saattrupdan@users.noreply.github.com> --- docs/source/en/main_classes/trainer.mdx | 60 +++++++++++++++++++++++++ src/transformers/training_args.py | 53 +++++++++++++++++----- 2 files changed, 103 insertions(+), 10 deletions(-) diff --git a/docs/source/en/main_classes/trainer.mdx b/docs/source/en/main_classes/trainer.mdx index 44c9d1d4b01973..ab942a2c1a7de6 100644 --- a/docs/source/en/main_classes/trainer.mdx +++ b/docs/source/en/main_classes/trainer.mdx @@ -591,6 +591,66 @@ More details in this [issues](https://github.com/pytorch/pytorch/issues/75676). More details mentioned in this [issue](https://github.com/pytorch/pytorch/issues/76501) (`The original model parameters' .grads are not set, meaning that they cannot be optimized separately (which is why we cannot support multiple parameter groups)`). +### Using Trainer for accelerated PyTorch Training on Mac + +With PyTorch v1.12 release, developers and researchers can take advantage of Apple silicon GPUs for significantly faster model training. +This unlocks the ability to perform machine learning workflows like prototyping and fine-tuning locally, right on Mac. +Apple's Metal Performance Shaders (MPS) as a backend for PyTorch enables this and can be used via the new `"mps"` device. +This will map computational graphs and primitives on the MPS Graph framework and tuned kernels provided by MPS. +For more information please refer official documents [Introducing Accelerated PyTorch Training on Mac](https://pytorch.org/blog/introducing-accelerated-pytorch-training-on-mac/) +and [MPS BACKEND](https://pytorch.org/docs/stable/notes/mps.html). + + + +We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing) on your MacOS machine. +It has major fixes related to model correctness and performance improvements for transformer based models. +Please refer to https://github.com/pytorch/pytorch/issues/82707 for more details. + + + +**Benefits of Training and Inference using Apple Silicon Chips** + +1. Enables users to train larger networks or batch sizes locally +2. Reduces data retrieval latency and provides the GPU with direct access to the full memory store due to unified memory architecture. +Therefore, improving end-to-end performance. +3. Reduces costs associated with cloud-based development or the need for additional local GPUs. + +**Pre-requisites**: To install torch with mps support, +please follow this nice medium article [GPU-Acceleration Comes to PyTorch on M1 Macs](https://medium.com/towards-data-science/gpu-acceleration-comes-to-pytorch-on-m1-macs-195c399efcc1). + +**Usage**: +User has to just pass `--use_mps_device` argument. +For example, you can run the offical Glue text classififcation task (from the root folder) using Apple Silicon GPU with below command: + +```bash +export TASK_NAME=mrpc + +python examples/pytorch/text-classification/run_glue.py \ + --model_name_or_path bert-base-cased \ + --task_name $TASK_NAME \ + --do_train \ + --do_eval \ + --max_seq_length 128 \ + --per_device_train_batch_size 32 \ + --learning_rate 2e-5 \ + --num_train_epochs 3 \ + --output_dir /tmp/$TASK_NAME/ \ + --use_mps_device \ + --overwrite_output_dir +``` + +**A few caveats to be aware of** + +1. Some PyTorch operations have not been implemented in mps and will throw an error. +One way to get around that is to set the environment variable `PYTORCH_ENABLE_MPS_FALLBACK=1`, +which will fallback to CPU for these operations. It still throws a UserWarning however. +2. Distributed setups `gloo` and `nccl` are not working with `mps` device. +This means that currently only single GPU of `mps` device type can be used. + +Finally, please, remember that, 🤗 `Trainer` only integrates MPS backend, therefore if you +have any problems or questions with regards to MPS backend usage, please, +file an issue with [PyTorch GitHub](https://github.com/pytorch/pytorch/issues). + Sections that were moved: [ DeepSpeed diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index e9a9f8f0043a79..7a23281d82ee21 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -22,6 +22,8 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Union +from packaging import version + from .debug_utils import DebugOption from .trainer_utils import ( EvaluationStrategy, @@ -478,6 +480,8 @@ class TrainingArguments: are also available. See the [Ray documentation]( https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial) for more options. + use_mps_device (`bool`, *optional*, defaults to `False`): + Whether to use Apple Silicon chip based `mps` device. """ output_dir: str = field( @@ -630,6 +634,9 @@ class TrainingArguments: }, ) no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"}) + use_mps_device: bool = field( + default=False, metadata={"help": "Whether to use Apple Silicon chip based `mps` device."} + ) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) data_seed: Optional[int] = field(default=None, metadata={"help": "Random seed to be used with data samplers."}) jit_mode_eval: bool = field( @@ -1368,16 +1375,42 @@ def _setup_devices(self) -> "torch.device": device = torch.device("cuda", self.local_rank) self._n_gpu = 1 elif self.local_rank == -1: - # if n_gpu is > 1 we'll use nn.DataParallel. - # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` - # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will - # trigger an error that a device index is missing. Index 0 takes into account the - # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` - # will use the first GPU in that env, i.e. GPU#1 - device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") - # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at - # the default value. - self._n_gpu = torch.cuda.device_count() + if self.use_mps_device: + if not torch.backends.mps.is_available(): + if not torch.backends.mps.is_built(): + raise AssertionError( + "MPS not available because the current PyTorch install was not " + "built with MPS enabled. Please install torch version >=1.12.0 on " + "your Apple silicon Mac running macOS 12.3 or later with a native " + "version (arm64) of Python" + ) + else: + raise AssertionError( + "MPS not available because the current MacOS version is not 12.3+ " + "and/or you do not have an MPS-enabled device on this machine." + ) + else: + if not version.parse(version.parse(torch.__version__).base_version) > version.parse("1.12.0"): + warnings.warn( + "We strongly recommend to install PyTorch >= 1.13 (nightly version at the time of writing)" + " on your MacOS machine. It has major fixes related to model correctness and performance" + " improvements for transformer based models. Please refer to" + " https://github.com/pytorch/pytorch/issues/82707 for more details." + ) + device = torch.device("mps") + self._n_gpu = 1 + + else: + # if n_gpu is > 1 we'll use nn.DataParallel. + # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` + # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will + # trigger an error that a device index is missing. Index 0 takes into account the + # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` + # will use the first GPU in that env, i.e. GPU#1 + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at + # the default value. + self._n_gpu = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs From 510c2a0b32112a399ad0ded57de587bd2d7d16b0 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 16 Aug 2022 13:41:37 +0200 Subject: [PATCH 101/539] Change scheduled CIs to use torch 1.12.1 (#18644) Co-authored-by: ydshieh --- docker/transformers-all-latest-gpu/Dockerfile | 2 +- docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile | 2 +- docker/transformers-pytorch-gpu/Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index b0a55ba8be946b..c1502651f64e38 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -9,7 +9,7 @@ SHELL ["sh", "-lc"] # The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant # to be used as arguments for docker build (so far). -ARG PYTORCH='1.12.0' +ARG PYTORCH='1.12.1' # (not always a valid torch version) ARG INTEL_TORCH_EXT='1.11.0' # Example: `cu102`, `cu113`, etc. diff --git a/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile b/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile index 843e5e2df5172f..2b3292f350d71d 100644 --- a/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile +++ b/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile @@ -3,7 +3,7 @@ LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive -ARG PYTORCH='1.12.0' +ARG PYTORCH='1.12.1' # Example: `cu102`, `cu113`, etc. ARG CUDA='cu113' diff --git a/docker/transformers-pytorch-gpu/Dockerfile b/docker/transformers-pytorch-gpu/Dockerfile index d7bb96e84ef69f..668bec3e715d86 100644 --- a/docker/transformers-pytorch-gpu/Dockerfile +++ b/docker/transformers-pytorch-gpu/Dockerfile @@ -12,7 +12,7 @@ RUN git clone https://github.com/huggingface/transformers && cd transformers && RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing] # If set to nothing, will install the latest version -ARG PYTORCH='1.12.0' +ARG PYTORCH='1.12.1' ARG TORCH_VISION='' ARG TORCH_AUDIO='' From 81ab11124f24d21c683c14e0413fe5de86c6bc93 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 16 Aug 2022 13:53:47 +0200 Subject: [PATCH 102/539] Add checks for some workflow jobs (#18583) Co-authored-by: ydshieh --- .github/workflows/self-push.yml | 34 +++++++++++++++-- utils/notification_service.py | 67 ++++++++++++++++++++++++--------- 2 files changed, 80 insertions(+), 21 deletions(-) diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index bb397bc8574829..d0efae8b479844 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -111,9 +111,24 @@ jobs: echo "::set-output name=matrix::$keys" echo "::set-output name=test_map::$test_map" + run_check_runners: + name: Check Runners + needs: setup + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] + container: + image: huggingface/transformers-all-latest-gpu + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + steps: + - name: NVIDIA-SMI + run: | + nvidia-smi + run_tests_single_gpu: name: Model tests - needs: setup + needs: [setup, run_check_runners] # `dummy` means there is no test to run if: contains(fromJson(needs.setup.outputs.matrix), 'dummy') != true strategy: @@ -198,7 +213,7 @@ jobs: run_tests_multi_gpu: name: Model tests - needs: setup + needs: [setup, run_check_runners] # `dummy` means there is no test to run if: contains(fromJson(needs.setup.outputs.matrix), 'dummy') != true strategy: @@ -285,7 +300,7 @@ jobs: run_tests_torch_cuda_extensions_single_gpu: name: Torch CUDA extension tests - needs: setup + needs: [setup, run_check_runners] if: contains(fromJson(needs.setup.outputs.matrix), 'deepspeed') || contains(fromJson(needs.setup.outputs.matrix), 'extended') strategy: fail-fast: false @@ -364,7 +379,7 @@ jobs: run_tests_torch_cuda_extensions_multi_gpu: name: Torch CUDA extension tests - needs: setup + needs: [setup, run_check_runners] if: contains(fromJson(needs.setup.outputs.matrix), 'deepspeed') || contains(fromJson(needs.setup.outputs.matrix), 'extended') strategy: fail-fast: false @@ -447,12 +462,20 @@ jobs: if: always() needs: [ setup, + run_check_runners, run_tests_single_gpu, run_tests_multi_gpu, run_tests_torch_cuda_extensions_single_gpu, run_tests_torch_cuda_extensions_multi_gpu ] steps: + - name: Preliminary job status + shell: bash + # For the meaning of these environment variables, see the job `Setup` + run: | + echo "Setup status: ${{ needs.setup.result }}" + echo "Runner status: ${{ needs.run_check_runners.result }}" + # Necessary to get the correct branch name and commit SHA for `workflow_run` event # We also take into account the `push` event (we might want to test some changes in a branch) - name: Prepare custom environment variables @@ -498,6 +521,9 @@ jobs: CI_TITLE_PUSH: ${{ github.event.head_commit.message }} CI_TITLE_WORKFLOW_RUN: ${{ github.event.workflow_run.head_commit.message }} CI_SHA: ${{ env.CI_SHA }} + SETUP_STATUS: ${{ needs.setup.result }} + RUNNER_STATUS: ${{ needs.run_check_runners.result }} + # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | diff --git a/utils/notification_service.py b/utils/notification_service.py index 4918b4a459ac38..9ed97236d46270 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -387,28 +387,52 @@ def payload(self) -> str: return json.dumps(blocks) @staticmethod - def error_out(): - payload = [ - { - "type": "section", - "text": { - "type": "plain_text", - "text": "There was an issue running the tests.", - }, - "accessory": { - "type": "button", - "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, - "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", - }, - } - ] + def error_out(title, ci_title="", setup_failed=False, runner_failed=False): + + blocks = [] + title_block = {"type": "header", "text": {"type": "plain_text", "text": title}} + blocks.append(title_block) + + if ci_title: + ci_title_block = {"type": "section", "text": {"type": "mrkdwn", "text": ci_title}} + blocks.append(ci_title_block) + + if setup_failed: + text = "💔 Setup job failed. Tests are not run. 😭" + elif runner_failed: + text = "💔 CI runners have problems! Tests are not run. 😭" + else: + text = "💔 There was an issue running the tests. 😭" + + error_block_1 = { + "type": "header", + "text": { + "type": "plain_text", + "text": text, + }, + } + error_block_2 = { + "type": "section", + "text": { + "type": "plain_text", + "text": "🙏 Let's fix it ASAP! 🙏", + }, + "accessory": { + "type": "button", + "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, + "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", + }, + } + blocks.extend([error_block_1, error_block_2]) + + payload = json.dumps(blocks) print("Sending the following payload") - print(json.dumps({"blocks": json.loads(payload)})) + print(json.dumps({"blocks": blocks})) client.chat_postMessage( channel=os.environ["CI_SLACK_REPORT_CHANNEL_ID"], - text="There was an issue running the tests.", + text=text, blocks=payload, ) @@ -630,6 +654,11 @@ def prepare_reports(title, header, reports, to_truncate=True): if __name__ == "__main__": + setup_status = os.environ.get("SETUP_STATUS") + runner_status = os.environ.get("RUNNER_STATUS") + setup_failed = True if setup_status is not None and setup_status != "success" else False + runner_failed = True if runner_status is not None and runner_status != "success" else False + org = "huggingface" repo = "transformers" repository_full_name = f"{org}/{repo}" @@ -689,6 +718,10 @@ def prepare_reports(title, header, reports, to_truncate=True): else: ci_title = "" + if setup_failed or runner_failed: + Message.error_out(title, ci_title, setup_failed, runner_failed) + exit(0) + arguments = sys.argv[1:][0] try: models = ast.literal_eval(arguments) From fd9aa82b07d9b844a21f18f1622de5ca104f25bd Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 16 Aug 2022 13:30:52 +0100 Subject: [PATCH 103/539] TF: Fix generation repetition penalty with XLA (#18648) --- src/transformers/generation_tf_logits_process.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/generation_tf_logits_process.py b/src/transformers/generation_tf_logits_process.py index 7b3f876212b867..f17ed04686860b 100644 --- a/src/transformers/generation_tf_logits_process.py +++ b/src/transformers/generation_tf_logits_process.py @@ -262,9 +262,11 @@ def _create_score_penalties(self, input_ids: tf.Tensor, logits: tf.Tensor) -> tf # Scatters the penalties token_penalties = tf.ones(logits.shape) + batch_size = input_ids.shape[0] + seq_len = tf.shape(input_ids)[1] # the sequence length has dynamic size, hence the dynamic shape indexable_prev_input_ids = tf.concat( ( - tf.expand_dims(tf.repeat(tf.range(input_ids.shape[0]), input_ids.shape[1]), axis=-1), + tf.expand_dims(tf.repeat(tf.range(batch_size), seq_len), axis=-1), tf.expand_dims(tf.reshape(input_ids, [-1]), axis=-1), ), axis=1, From a27195b1de89271be014af149462a647f120e1bd Mon Sep 17 00:00:00 2001 From: flozi00 Date: Tue, 16 Aug 2022 17:20:46 +0200 Subject: [PATCH 104/539] Update longt5.mdx (#18634) --- docs/source/en/model_doc/longt5.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/model_doc/longt5.mdx b/docs/source/en/model_doc/longt5.mdx index 27a1d68515847c..0e73d6c8ddff0e 100644 --- a/docs/source/en/model_doc/longt5.mdx +++ b/docs/source/en/model_doc/longt5.mdx @@ -37,7 +37,7 @@ Tips: - [`LongT5ForConditionalGeneration`] is an extension of [`T5ForConditionalGeneration`] exchanging the traditional encoder *self-attention* layer with efficient either *local* attention or *transient-global* (*tglobal*) attention. - Unlike the T5 model, LongT5 does not use a task prefix. Furthermore, it uses a different pre-training objective -inspired by the pre-training of `[PegasusForConditionalGeneration]`. +inspired by the pre-training of [`PegasusForConditionalGeneration`]. - LongT5 model is designed to work efficiently and very well on long-range *sequence-to-sequence* tasks where the input sequence exceeds commonly used 512 tokens. It is capable of handling input sequences of a length up to 16,384 tokens. - For *Local Attention*, the sparse sliding-window local attention operation allows a given token to attend only `r` From 25e651a2de64236c62f07eeb933c2ec33ac65e0d Mon Sep 17 00:00:00 2001 From: zhoutang776 <47708118+zhoutang776@users.noreply.github.com> Date: Tue, 16 Aug 2022 10:25:57 -0700 Subject: [PATCH 105/539] Update run_translation_no_trainer.py (#18637) * Update run_translation_no_trainer.py found an error in selecting `no_decay` parameters and some small modifications when the user continues to train from a checkpoint * fixs `no_decay` and `resume_step` issue 1. change `no_decay` list 2. if use continue to train their model from provided checkpoint, the `resume_step` will not be initialized properly if `args.gradient_accumulation_steps != 1` --- .../pytorch/language-modeling/run_clm_no_trainer.py | 13 ++++++++++--- .../pytorch/language-modeling/run_mlm_no_trainer.py | 11 +++++++++-- .../translation/run_translation_no_trainer.py | 13 ++++++++++--- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index 3fd67d5fbf66e4..225b88a49440cc 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -464,7 +464,7 @@ def group_texts(examples): # Optimizer # Split weights in two groups, one with weight decay and the other not. - no_decay = ["bias", "LayerNorm.weight"] + no_decay = ["bias", "layer_norm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], @@ -558,10 +558,15 @@ def group_texts(examples): starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: - resume_step = int(training_difference.replace("step_", "")) + # need to multiply `gradient_accumulation_steps` to reflect real steps + resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) + # update the progress_bar if load from checkpoint + progress_bar.update(starting_epoch * num_update_steps_per_epoch) + completed_steps = starting_epoch * num_update_steps_per_epoch + for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: @@ -570,7 +575,9 @@ def group_texts(examples): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == starting_epoch: if resume_step is not None and step < resume_step: - completed_steps += 1 + if step % args.gradient_accumulation_steps == 0: + progress_bar.update(1) + completed_steps += 1 continue with accelerator.accumulate(model): diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index 80dfcf9a9194e5..c5f6aad4126f5a 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -602,10 +602,15 @@ def group_texts(examples): starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: - resume_step = int(training_difference.replace("step_", "")) + # need to multiply `gradient_accumulation_steps` to reflect real steps + resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) + # update the progress_bar if load from checkpoint + progress_bar.update(starting_epoch * num_update_steps_per_epoch) + completed_steps = starting_epoch * num_update_steps_per_epoch + for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: @@ -614,7 +619,9 @@ def group_texts(examples): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == starting_epoch: if resume_step is not None and step < resume_step: - completed_steps += 1 + if step % args.gradient_accumulation_steps == 0: + progress_bar.update(1) + completed_steps += 1 continue with accelerator.accumulate(model): diff --git a/examples/pytorch/translation/run_translation_no_trainer.py b/examples/pytorch/translation/run_translation_no_trainer.py index a6b0988f63d090..34c2ad1964090f 100644 --- a/examples/pytorch/translation/run_translation_no_trainer.py +++ b/examples/pytorch/translation/run_translation_no_trainer.py @@ -510,7 +510,7 @@ def preprocess_function(examples): # Optimizer # Split weights in two groups, one with weight decay and the other not. - no_decay = ["bias", "LayerNorm.weight"] + no_decay = ["bias", "LayerNorm.weight", "layer_norm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], @@ -607,10 +607,15 @@ def postprocess_text(preds, labels): starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: - resume_step = int(training_difference.replace("step_", "")) + # need to multiply `gradient_accumulation_steps` to reflect real steps + resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) + # update the progress_bar if load from checkpoint + progress_bar.update(starting_epoch * num_update_steps_per_epoch) + completed_steps = starting_epoch * num_update_steps_per_epoch + for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: @@ -619,7 +624,9 @@ def postprocess_text(preds, labels): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == starting_epoch: if resume_step is not None and step < resume_step: - completed_steps += 1 + if step % args.gradient_accumulation_steps == 0: + progress_bar.update(1) + completed_steps += 1 continue outputs = model(**batch) loss = outputs.loss From 6d175c1129538b27230be170fc1184e8490e95ef Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Wed, 17 Aug 2022 00:48:10 +0200 Subject: [PATCH 106/539] [bnb] Minor modifications (#18631) * bnb minor modifications - refactor documentation - add troubleshooting README - add PyPi library on DockerFile * Apply suggestions from code review Co-authored-by: Stas Bekman * Apply suggestions from code review * Apply suggestions from code review * Apply suggestions from code review * put in one block - put bash instructions in one block * update readme - refactor a bit hardware requirements * change text a bit * Apply suggestions from code review Co-authored-by: Yih-Dar <2521628+ydshieh@users.noreply.github.com> * apply suggestions Co-authored-by: Yih-Dar <2521628+ydshieh@users.noreply.github.com> * add link to paper * Apply suggestions from code review Co-authored-by: Stas Bekman * Update tests/mixed_int8/README.md * Apply suggestions from code review * refactor a bit * add instructions Turing & Amperer Co-authored-by: Stas Bekman * add A6000 * clarify a bit * remove small part * Update tests/mixed_int8/README.md Co-authored-by: Stas Bekman Co-authored-by: Yih-Dar <2521628+ydshieh@users.noreply.github.com> --- docker/transformers-all-latest-gpu/Dockerfile | 2 +- docs/source/en/main_classes/model.mdx | 40 ------ docs/source/en/perf_train_gpu_one.mdx | 53 ++++++++ tests/mixed_int8/README.md | 117 +++++++++++++++--- 4 files changed, 154 insertions(+), 58 deletions(-) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index c1502651f64e38..4db6f51826f02b 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -46,7 +46,7 @@ RUN python3 -m pip install -U "itsdangerous<2.1.0" RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate # Add bitsandbytes for mixed int8 testing -RUN python3 -m pip install -i https://test.pypi.org/simple/ bitsandbytes==0.31.5 +RUN python3 -m pip install --no-cache-dir bitsandbytes RUN python3 -m pip install --no-cache-dir decord diff --git a/docs/source/en/main_classes/model.mdx b/docs/source/en/main_classes/model.mdx index 10f81e55d74506..fd19b3db52b734 100644 --- a/docs/source/en/main_classes/model.mdx +++ b/docs/source/en/main_classes/model.mdx @@ -133,46 +133,6 @@ model = AutoModel.from_config(config) Due to Pytorch design, this functionality is only available for floating dtypes. -### `bitsandbytes` integration for Int8 mixed-precision matrix decomposition - -From the paper `GPT3.int8() : 8-bit Matrix Multiplication for Transformers at Scale`, we suport HuggingFace 🤗 integration for all models in the Hub with few lines of code. -For models trained in half-precision (aka, either `float16` or `bfloat16`) or full precision. This method aims to reduce `nn.Linear` size by 2 (if trained in half precision) or by 4 if trained in full precision, without affecting too much quality by operating on the outliers in half-precision. -This technique is useful and works well for billion scale models (>1B parameters) therefore we advice you to use it only for models of that scale. This method has been tested for 2-billion to 176-billion scale models and supports only PyTorch models. - -![HFxbitsandbytes.png](https://s3.amazonaws.com/moonup/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) - -Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) and systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models (>=176B parameters). -Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently distributed for large models. These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models (small models, fine-tuning). - -Note also that you would require a GPU to run mixed-8bit models as the kernels has been compiled for GPUs only. Make sure that you have enough GPU RAM to store the quarter (or half if your model is natively in half precision) of the model before using this feature. - -Below are some notes to help you use this module, or follow this demo on Google colab: [![Open In Google Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) - -#### Requirements - -- Make sure you run that on a NVIDIA GPU that supports 8-bit tensor cores (Turing or Ampere GPUs - e.g. T4, RTX20s RTX30s, A40-A100). Note that previous generations of NVIDIA GPUs do not support 8-bit tensor cores. -- Install the correct version of `bitsandbytes` by running: -`pip install -i https://test.pypi.org/simple/ bitsandbytes` -- Install `accelerate`: -`pip install accelerate` - -#### Running mixed-int8 models - -After carefully installing the required libraries, the way to load your mixed 8-bit model is as follows: -```py -model_name = "bigscience/bloom-2b5" -model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) -``` -The implementation supports multi-GPU setup thanks to `accelerate` as backend. If you want to control the GPU memory you want to allocate for each GPU, you can use the `max_memory` argument as follows: -(If allocating `1GB` into GPU-0 and `2GB` into GPU-1, you can use `max_memory={0:"1GB", 1:"2GB"}`) -```py -max_memory_mapping = {0: "1GB", 1: "2GB"} -model_name = "bigscience/bloom-3b" -model_8bit = AutoModelForCausalLM.from_pretrained( - model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping -) -``` - ## ModuleUtilsMixin diff --git a/docs/source/en/perf_train_gpu_one.mdx b/docs/source/en/perf_train_gpu_one.mdx index 56cd6c6f10e333..32748186a42fdf 100644 --- a/docs/source/en/perf_train_gpu_one.mdx +++ b/docs/source/en/perf_train_gpu_one.mdx @@ -733,3 +733,56 @@ This feature involves 3 different libraries. To install them, please follow the - [Torchdynamo installation](https://github.com/pytorch/torchdynamo#requirements-and-setup) - [Functorch installation](https://github.com/pytorch/functorch#install) - [Torch-TensorRT(FX) installation](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst#installation) + +## `bitsandbytes` integration for Int8 mixed-precision matrix decomposition + +From the paper [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://arxiv.org/abs/2208.07339), we support HuggingFace integration for all models in the Hub with a few lines of code. +The method reduce `nn.Linear` size by 2 for `float16` and `bfloat16` weights and by 4 for `float32` weights, with close to no impact to the quality by operating on the outliers in half-precision. + +![HFxbitsandbytes.png](https://s3.amazonaws.com/moonup/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) + +Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) a systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models. +For more details regarding the method, check out the [paper](https://arxiv.org/abs/2208.07339) or our [blogpost about the integration](https://huggingface.co/blog/hf-bitsandbytes-integration). + +![MixedInt8.gif](https://s3.amazonaws.com/moonup/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) + +Note, that you would require a GPU to run mixed-8bit models as the kernels have been compiled for GPUs only. Make sure that you have enough GPU memory to store the quarter (or half if your model weights are in half precision) of the model before using this feature. +Below are some notes to help you use this module, or follow the demos on [Google colab](#colab-demos). + +### Requirements + +- Make sure you run that on NVIDIA GPUs that support 8-bit tensor cores (Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100). +- Install the correct version of `bitsandbytes` by running: +`pip install bitsandbytes>=0.31.5` +- Install `accelerate` +`pip install accelerate>=0.12.0` + +### Running mixed-int8 models + +After installing the required libraries, the way to load your mixed 8-bit model is as follows: +```py +model_name = "bigscience/bloom-2b5" +model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) +``` +The current implementation supports a multi-GPU setup when using `accelerate`. If you want to control the GPU memory you want to allocate for each GPU use the `max_memory` argument as follows: + +```py +max_memory_mapping = {0: "1GB", 1: "2GB"} +model_name = "bigscience/bloom-3b" +model_8bit = AutoModelForCausalLM.from_pretrained( + model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping +) +``` + +In this example, the first GPU will use 1GB of memory and the second 2GB. + +### Colab demos + +With this method you can infer on models that were not possible to infer on a Google Colab before. +Check out the demo for running T5-11b (42GB in fp32)! Using 8-bit quantization on Google Colab: + +[![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) + +Or this demo for BLOOM-3B: + +[![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) \ No newline at end of file diff --git a/tests/mixed_int8/README.md b/tests/mixed_int8/README.md index c0173bed7a6b7a..7a0f86dbb25639 100644 --- a/tests/mixed_int8/README.md +++ b/tests/mixed_int8/README.md @@ -1,37 +1,120 @@ # Testing mixed int8 quantization +![HFxbitsandbytes.png](https://s3.amazonaws.com/moonup/production/uploads/1660567705337-62441d1d9fdefb55a0b7d12c.png) + +The following is the recipe on how to effectively debug `bitsandbytes` integration on Hugging Face `transformers`. + +## Library requirements + ++ `transformers>=4.22.0` ++ `accelerate>=0.12.0` ++ `bitsandbytes>=0.31.5`. ## Hardware requirements -I am using a setup of 2 GPUs that are NVIDIA-Tesla T4 15GB +The following instructions are tested with 2 NVIDIA-Tesla T4 GPUs. To run successfully `bitsandbytes` you would need a 8-bit core tensor supported GPU. Note that Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100, A6000 should be supported. ## Virutal envs -```conda create --name int8-testing python==3.8``` -```git clone https://github.com/younesbelkada/transformers.git && git checkout integration-8bit``` -```pip install -e ".[dev]"``` -```pip install -i https://test.pypi.org/simple/ bitsandbytes``` -```pip install git+https://github.com/huggingface/accelerate.git@e0212893ea6098cc0a7a3c7a6eb286a9104214c1``` +```bash +conda create --name int8-testing python==3.8 +pip install bitsandbytes>=0.31.5 +pip install accelerate>=0.12.0 +pip install transformers>=4.23.0 +``` +if `transformers>=4.23.0` is not released yet, then use: +``` +pip install git+https://github.com/huggingface/transformers.git +``` + +## Troubleshooting +A list of common errors: -## Trobleshooting +### Torch does not correctly do the operations on GPU -```conda create --name int8-testing python==3.8``` -```pip install -i https://test.pypi.org/simple/ bitsandbytes``` -```conda install pytorch torchvision torchaudio -c pytorch``` -```git clone https://github.com/younesbelkada/transformers.git && git checkout integration-8bit``` -```pip install -e ".[dev]"``` -```pip install git+https://github.com/huggingface/accelerate.git@b52b793ea8bac108ba61192eead3cf11ca02433d``` +First check that: -### Check driver settings: +```py +import torch +vec = torch.randn(1, 2, 3).to(0) ``` -nvcc --version + +Works without any error. If not, install torch using `conda` like: + +```bash +conda create --name int8-testing python==3.8 +conda install pytorch torchvision torchaudio cudatoolkit=11.6 -c pytorch -c conda-forge +pip install bitsandbytes>=0.31.5 +pip install accelerate>=0.12.0 +pip install transformers>=4.23.0 ``` +For the latest pytorch instructions please see [this](https://pytorch.org/get-started/locally/) + +and the snippet above should work. + +### ` bitsandbytes operations are not supported under CPU!` + +This happens when some Linear weights are set to the CPU when using `accelerate`. Please check carefully `model.hf_device_map` and make sure that there is no `Linear` module that is assigned to CPU. It is fine to have the last module (usually the Lm_head) set on CPU. + +### `To use the type as a Parameter, please correct the detach() semantics defined by __torch_dispatch__() implementation.` + +Use the latest version of `accelerate` with a command such as: `pip install -U accelerate` and the problem should be solved. + +### `Parameter has no attribue .CB` + +Same solution as above. + +### `RuntimeError: CUDA error: an illegal memory access was encountered ... consider passing CUDA_LAUNCH_BLOCKING=1` + +Run your script by pre-pending `CUDA_LAUNCH_BLOCKING=1` and you should observe an error as described in the next section. + +### `CUDA illegal memory error: an illegal memory access at line...`: +Check the CUDA verisons with: +``` +nvcc --version +``` +and confirm it is the same version as the one detected by `bitsandbytes`. If not, run: ``` ls -l $CONDA_PREFIX/lib/libcudart.so ``` +or +``` +ls -l $LD_LIBRARY_PATH +``` +Check if `libcudart.so` has a correct symlink that is set. Sometimes `nvcc` detects the correct CUDA version but `bitsandbytes` doesn't. You have to make sure that the symlink that is set for the file `libcudart.so` is redirected to the correct CUDA file. + +Here is an example of a badly configured CUDA installation: + +`nvcc --version` gives: + +![Screenshot 2022-08-15 at 15.12.23.png](https://s3.amazonaws.com/moonup/production/uploads/1660569220888-62441d1d9fdefb55a0b7d12c.png) + +which means that the detected CUDA version is 11.3 but `bitsandbytes` outputs: + +![image.png](https://s3.amazonaws.com/moonup/production/uploads/1660569284243-62441d1d9fdefb55a0b7d12c.png) + +First check: + +```bash +echo $LD_LIBRARY_PATH +``` + +If this contains multiple paths separated by `:`. Then you have to make sure that the correct CUDA version is set. By doing: + +```bash +ls -l $path/libcudart.so +``` + +On each path (`$path`) separated by `:`. +If not, simply run +```bash +ls -l $LD_LIBRARY_PATH/libcudart.so +``` + +and you can see -### Recurrent bugs +![Screenshot 2022-08-15 at 15.12.33.png](https://s3.amazonaws.com/moonup/production/uploads/1660569176504-62441d1d9fdefb55a0b7d12c.png) -Sometimes you have to run a "dummy" inference pass when dealing with a multi-GPU setup. Checkout the ```test_multi_gpu_loading``` and the ```test_pipeline``` functions. \ No newline at end of file +If you see that the file is linked to the wrong CUDA version (here 10.2), find the correct location for `libcudart.so` (`find --name libcudart.so`) and replace the environment variable `LD_LIBRARY_PATH` with the one containing the correct `libcudart.so` file. \ No newline at end of file From 358478e7296cf3348e11901ddca1c25e3886b260 Mon Sep 17 00:00:00 2001 From: Stefan Schweter Date: Wed, 17 Aug 2022 09:50:57 +0200 Subject: [PATCH 107/539] Examples: add Bloom support for token classification (#18632) * examples: add Bloom support for token classification (FLAX, PyTorch and TensorFlow) * examples: remove support for Bloom in token classication (FLAX and TensorFlow currently have no support for it) --- examples/pytorch/token-classification/run_ner.py | 2 +- examples/pytorch/token-classification/run_ner_no_trainer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 9000b5006e03fa..a272f25aa417ea 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -348,7 +348,7 @@ def get_label_list(labels): ) tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path - if config.model_type in {"gpt2", "roberta"}: + if config.model_type in {"bloom", "gpt2", "roberta"}: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, cache_dir=model_args.cache_dir, diff --git a/examples/pytorch/token-classification/run_ner_no_trainer.py b/examples/pytorch/token-classification/run_ner_no_trainer.py index f5736f35c79148..937abb718e72b1 100755 --- a/examples/pytorch/token-classification/run_ner_no_trainer.py +++ b/examples/pytorch/token-classification/run_ner_no_trainer.py @@ -398,7 +398,7 @@ def get_label_list(labels): "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) - if config.model_type in {"gpt2", "roberta"}: + if config.model_type in {"bloom", "gpt2", "roberta"}: tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True, add_prefix_space=True) else: tokenizer = AutoTokenizer.from_pretrained(tokenizer_name_or_path, use_fast=True) From c99e984657b64dd8f19de74405bbf13763ab4f2b Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 17 Aug 2022 10:04:49 +0200 Subject: [PATCH 108/539] Fix Yolos ONNX export test (#18606) Co-authored-by: lewtun Co-authored-by: ydshieh --- tests/onnx/test_onnx_v2.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index 79eff60cefed63..b3c0ffb1f371b9 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -284,6 +284,12 @@ def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_c model_class = FeaturesManager.get_model_class_for_feature(feature) config = AutoConfig.from_pretrained(model_name) model = model_class.from_config(config) + + # Dynamic axes aren't supported for YOLO-like models. This means they cannot be exported to ONNX on CUDA devices. + # See: https://github.com/ultralytics/yolov5/pull/8378 + if model.__class__.__name__.startswith("Yolos") and device != "cpu": + return + onnx_config = onnx_config_class_constructor(model.config) if is_torch_available(): From 86d0b26d6c5566506b1be55002654b48d9c19ffe Mon Sep 17 00:00:00 2001 From: Jingya HUANG <44135271+JingyaHuang@users.noreply.github.com> Date: Wed, 17 Aug 2022 15:59:43 +0200 Subject: [PATCH 109/539] Fix matmul inputs dtype (#18585) --- src/transformers/models/deberta/modeling_deberta.py | 13 ++++++------- .../models/deberta_v2/modeling_deberta_v2.py | 8 +++++--- src/transformers/models/sew_d/modeling_sew_d.py | 8 +++++--- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index df3d4d95cd0170..0fbb66ba8fd054 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -14,7 +14,6 @@ # limitations under the License. """ PyTorch DeBERTa model.""" -import math from collections.abc import Sequence from typing import Optional, Tuple, Union @@ -640,8 +639,8 @@ def linear(w, b, x): qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)] qkvb = [None] * 3 - q = linear(qkvw[0], qkvb[0], query_states) - k, v = [linear(qkvw[i], qkvb[i], hidden_states) for i in range(1, 3)] + q = linear(qkvw[0], qkvb[0], torch.tensor(query_states, dtype=qkvw[0].dtype)) + k, v = [linear(qkvw[i], qkvb[i], torch.tensor(hidden_states, dtype=qkvw[i].dtype)) for i in range(1, 3)] query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]] query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :]) @@ -650,8 +649,8 @@ def linear(w, b, x): rel_att = None # Take the dot product between "query" and "key" to get the raw attention scores. scale_factor = 1 + len(self.pos_att_type) - scale = math.sqrt(query_layer.size(-1) * scale_factor) - query_layer = query_layer / scale + scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) + query_layer = query_layer / torch.tensor(scale, dtype=query_layer.dtype) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.relative_attention: rel_embeddings = self.pos_dropout(rel_embeddings) @@ -711,13 +710,13 @@ def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embedd if "p2c" in self.pos_att_type: pos_query_layer = self.pos_q_proj(rel_embeddings) pos_query_layer = self.transpose_for_scores(pos_query_layer) - pos_query_layer /= math.sqrt(pos_query_layer.size(-1) * scale_factor) + pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor) if query_layer.size(-2) != key_layer.size(-2): r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device) else: r_pos = relative_pos p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1) - p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2)) + p2c_att = torch.matmul(key_layer, torch.tensor(pos_query_layer.transpose(-1, -2), dtype=key_layer.dtype)) p2c_att = torch.gather( p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer) ).transpose(-1, -2) diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index 3243ee108d488b..1a9252a7d30707 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -717,7 +717,9 @@ def forward( if "p2c" in self.pos_att_type: scale_factor += 1 scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) - attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale + attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / torch.tensor( + scale, dtype=query_layer.dtype + ) if self.relative_attention: rel_embeddings = self.pos_dropout(rel_embeddings) rel_att = self.disentangled_attention_bias( @@ -799,7 +801,7 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ dim=-1, index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]), ) - score += c2p_att / scale + score += c2p_att / torch.tensor(scale, dtype=c2p_att.dtype) # position->content if "p2c" in self.pos_att_type: @@ -822,7 +824,7 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ dim=-1, index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]), ).transpose(-1, -2) - score += p2c_att / scale + score += p2c_att / torch.tensor(scale, dtype=p2c_att.dtype) return score diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index fe5836a80f36e0..bcd95139c55898 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -791,7 +791,9 @@ def forward( if "p2c" in self.pos_att_type: scale_factor += 1 scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) - attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / scale + attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / torch.tensor( + scale, dtype=query_layer.dtype + ) if self.relative_attention: rel_embeddings = self.pos_dropout(rel_embeddings) rel_att = self.disentangled_attention_bias( @@ -873,7 +875,7 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ dim=-1, index=c2p_pos.squeeze(0).expand([query_layer.size(0), query_layer.size(1), relative_pos.size(-1)]), ) - score += c2p_att / scale + score += c2p_att / torch.tensor(scale, dtype=c2p_att.dtype) # position->content if "p2c" in self.pos_att_type: @@ -896,7 +898,7 @@ def disentangled_attention_bias(self, query_layer, key_layer, relative_pos, rel_ dim=-1, index=p2c_pos.squeeze(0).expand([query_layer.size(0), key_layer.size(-2), key_layer.size(-2)]), ).transpose(-1, -2) - score += p2c_att / scale + score += p2c_att / torch.tensor(scale, dtype=p2c_att.dtype) return score From 49e44b216b2559e34e945d5dcdbbe2238859e29b Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Wed, 17 Aug 2022 19:57:07 +0100 Subject: [PATCH 110/539] Update feature extractor methods to enable type cast before normalize (#18499) * Update methods to optionally rescale This is necessary to allow for casting our images / videos to numpy arrays within the feature extractors' call. We want to do this to make sure the behaviour is as expected when flags like are False. If some transformations aren't applied, then the output type can't be unexpected e.g. a list of PIL images instead of numpy arrays. * Cast images to numpy arrays in call to enable consistent behaviour with different configs * Remove accidental clip changes * Update tests to reflect the scaling logic We write a generic function to handle rescaling of our arrays. In order for the API to be intuitive, we take some factor c and rescale the image values by that. This means, the rescaling done in normalize and to_numpy_array are now done with array * (1/255) instead of array / 255. This leads to small differences in the resulting image. When testing, this was in the order of 1e-8, and so deemed OK --- src/transformers/image_utils.py | 26 +++++++++++++++++++++----- tests/utils/test_image_utils.py | 20 ++++++++++---------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/src/transformers/image_utils.py b/src/transformers/image_utils.py index e5a395341c0031..437e7c5685586b 100644 --- a/src/transformers/image_utils.py +++ b/src/transformers/image_utils.py @@ -131,6 +131,13 @@ def convert_rgb(self, image): return image.convert("RGB") + def rescale(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray: + """ + Rescale a numpy image by scale amount + """ + self._ensure_format_supported(image) + return image * scale + def to_numpy_array(self, image, rescale=None, channel_first=True): """ Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first @@ -153,11 +160,10 @@ def to_numpy_array(self, image, rescale=None, channel_first=True): if is_torch_tensor(image): image = image.numpy() - if rescale is None: - rescale = isinstance(image.flat[0], np.integer) + rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale if rescale: - image = image.astype(np.float32) / 255.0 + image = self.rescale(image.astype(np.float32), 1 / 255.0) if channel_first and image.ndim == 3: image = image.transpose(2, 0, 1) @@ -184,7 +190,7 @@ def expand_dims(self, image): image = np.expand_dims(image, axis=0) return image - def normalize(self, image, mean, std): + def normalize(self, image, mean, std, rescale=False): """ Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array if it's a PIL Image. @@ -196,11 +202,21 @@ def normalize(self, image, mean, std): The mean (per channel) to use for normalization. std (`List[float]` or `np.ndarray` or `torch.Tensor`): The standard deviation (per channel) to use for normalization. + rescale (`bool`, *optional*, defaults to `False`): + Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will + happen automatically. """ self._ensure_format_supported(image) if isinstance(image, PIL.Image.Image): - image = self.to_numpy_array(image) + image = self.to_numpy_array(image, rescale=True) + # If the input image is a PIL image, it automatically gets rescaled. If it's another + # type it may need rescaling. + elif rescale: + if isinstance(image, np.ndarray): + image = self.rescale(image.astype(np.float32), 1 / 255.0) + elif is_torch_tensor(image): + image = self.rescale(image.float(), 1 / 255.0) if isinstance(image, np.ndarray): if not isinstance(mean, np.ndarray): diff --git a/tests/utils/test_image_utils.py b/tests/utils/test_image_utils.py index 6c870e3341cdf9..3c1be7102c1abc 100644 --- a/tests/utils/test_image_utils.py +++ b/tests/utils/test_image_utils.py @@ -58,13 +58,13 @@ def test_conversion_image_to_array(self): array3 = feature_extractor.to_numpy_array(image, rescale=False) self.assertTrue(array3.dtype, np.uint8) self.assertEqual(array3.shape, (3, 16, 32)) - self.assertTrue(np.array_equal(array1, array3.astype(np.float32) / 255.0)) + self.assertTrue(np.array_equal(array1, array3.astype(np.float32) * (1 / 255.0))) # Conversion with no rescale and not channel first array4 = feature_extractor.to_numpy_array(image, rescale=False, channel_first=False) self.assertTrue(array4.dtype, np.uint8) self.assertEqual(array4.shape, (16, 32, 3)) - self.assertTrue(np.array_equal(array2, array4.astype(np.float32) / 255.0)) + self.assertTrue(np.array_equal(array2, array4.astype(np.float32) * (1 / 255.0))) def test_conversion_array_to_array(self): feature_extractor = ImageFeatureExtractionMixin() @@ -74,13 +74,13 @@ def test_conversion_array_to_array(self): array1 = feature_extractor.to_numpy_array(array) self.assertTrue(array1.dtype, np.float32) self.assertEqual(array1.shape, (3, 16, 32)) - self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) / 255.0)) + self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0))) # Same with no permute array2 = feature_extractor.to_numpy_array(array, channel_first=False) self.assertTrue(array2.dtype, np.float32) self.assertEqual(array2.shape, (16, 32, 3)) - self.assertTrue(np.array_equal(array2, array.astype(np.float32) / 255.0)) + self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0))) # Force rescale to False array3 = feature_extractor.to_numpy_array(array, rescale=False) @@ -110,13 +110,13 @@ def test_conversion_torch_to_array(self): array1 = feature_extractor.to_numpy_array(array) self.assertTrue(array1.dtype, np.float32) self.assertEqual(array1.shape, (3, 16, 32)) - self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) / 255.0)) + self.assertTrue(np.array_equal(array1, array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0))) # Same with no permute array2 = feature_extractor.to_numpy_array(array, channel_first=False) self.assertTrue(array2.dtype, np.float32) self.assertEqual(array2.shape, (16, 32, 3)) - self.assertTrue(np.array_equal(array2, array.astype(np.float32) / 255.0)) + self.assertTrue(np.array_equal(array2, array.astype(np.float32) * (1 / 255.0))) # Force rescale to False array3 = feature_extractor.to_numpy_array(array, rescale=False) @@ -160,7 +160,7 @@ def test_conversion_array_to_image(self): self.assertTrue(np.array_equal(np.array(image2), array)) # If the array has floating type, it's rescaled by default. - image3 = feature_extractor.to_pil_image(array.astype(np.float32) / 255.0) + image3 = feature_extractor.to_pil_image(array.astype(np.float32) * (1 / 255.0)) self.assertTrue(isinstance(image3, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image3), array)) @@ -170,7 +170,7 @@ def test_conversion_array_to_image(self): self.assertTrue(np.array_equal(np.array(image4), array)) # And with floats + channel first. - image5 = feature_extractor.to_pil_image(array.transpose(2, 0, 1).astype(np.float32) / 255.0) + image5 = feature_extractor.to_pil_image(array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0)) self.assertTrue(isinstance(image5, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image5), array)) @@ -201,7 +201,7 @@ def test_conversion_tensor_to_image(self): self.assertTrue(np.array_equal(np.array(image4), array)) # And with floats + channel first. - image5 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1).float() / 255.0) + image5 = feature_extractor.to_pil_image(tensor.permute(2, 0, 1).float() * (1 / 255.0)) self.assertTrue(isinstance(image5, PIL.Image.Image)) self.assertTrue(np.array_equal(np.array(image5), array)) @@ -316,7 +316,7 @@ def test_normalize_image(self): self.assertEqual(normalized_image.shape, (3, 16, 32)) # During the conversion rescale and channel first will be applied. - expected = array.transpose(2, 0, 1).astype(np.float32) / 255.0 + expected = array.transpose(2, 0, 1).astype(np.float32) * (1 / 255.0) np_mean = np.array(mean).astype(np.float32)[:, None, None] np_std = np.array(std).astype(np.float32)[:, None, None] expected = (expected - np_mean) / np_std From 582c537175fc76a154f6f849a7794b0c09d72201 Mon Sep 17 00:00:00 2001 From: Matt Date: Thu, 18 Aug 2022 03:09:09 -0400 Subject: [PATCH 111/539] Allow users to force TF availability (#18650) * Allow users to force TF availability * Correctly name the envvar! --- src/transformers/utils/import_utils.py | 73 ++++++++++++++------------ 1 file changed, 40 insertions(+), 33 deletions(-) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 37172d14fcc289..219552976a0a6c 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -42,6 +42,8 @@ USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper() +FORCE_TF_AVAILABLE = os.environ.get("FORCE_TF_AVAILABLE", "AUTO").upper() + _torch_version = "N/A" if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES: _torch_available = importlib.util.find_spec("torch") is not None @@ -57,40 +59,45 @@ _tf_version = "N/A" -if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: - _tf_available = importlib.util.find_spec("tensorflow") is not None - if _tf_available: - candidates = ( - "tensorflow", - "tensorflow-cpu", - "tensorflow-gpu", - "tf-nightly", - "tf-nightly-cpu", - "tf-nightly-gpu", - "intel-tensorflow", - "intel-tensorflow-avx512", - "tensorflow-rocm", - "tensorflow-macos", - "tensorflow-aarch64", - ) - _tf_version = None - # For the metadata, we have to look for both tensorflow and tensorflow-cpu - for pkg in candidates: - try: - _tf_version = importlib_metadata.version(pkg) - break - except importlib_metadata.PackageNotFoundError: - pass - _tf_available = _tf_version is not None - if _tf_available: - if version.parse(_tf_version) < version.parse("2"): - logger.info(f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum.") - _tf_available = False - else: - logger.info(f"TensorFlow version {_tf_version} available.") +if FORCE_TF_AVAILABLE in ENV_VARS_TRUE_VALUES: + _tf_available = True else: - logger.info("Disabling Tensorflow because USE_TORCH is set") - _tf_available = False + if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES: + _tf_available = importlib.util.find_spec("tensorflow") is not None + if _tf_available: + candidates = ( + "tensorflow", + "tensorflow-cpu", + "tensorflow-gpu", + "tf-nightly", + "tf-nightly-cpu", + "tf-nightly-gpu", + "intel-tensorflow", + "intel-tensorflow-avx512", + "tensorflow-rocm", + "tensorflow-macos", + "tensorflow-aarch64", + ) + _tf_version = None + # For the metadata, we have to look for both tensorflow and tensorflow-cpu + for pkg in candidates: + try: + _tf_version = importlib_metadata.version(pkg) + break + except importlib_metadata.PackageNotFoundError: + pass + _tf_available = _tf_version is not None + if _tf_available: + if version.parse(_tf_version) < version.parse("2"): + logger.info( + f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum." + ) + _tf_available = False + else: + logger.info(f"TensorFlow version {_tf_version} available.") + else: + logger.info("Disabling Tensorflow because USE_TORCH is set") + _tf_available = False if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES: From 0ea53822f8bdc2c0c41b45bcd04fa6c031e5e700 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Thu, 18 Aug 2022 10:03:50 +0200 Subject: [PATCH 112/539] [LongT5] Correct docs long t5 (#18669) * add first generation tutorial * [LongT5 Docs] Correct docs * correct expected string * remove incorrect file --- src/transformers/models/longt5/modeling_longt5.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/longt5/modeling_longt5.py b/src/transformers/models/longt5/modeling_longt5.py index abd1cb778655a6..3a8fb9f66a09fc 100644 --- a/src/transformers/models/longt5/modeling_longt5.py +++ b/src/transformers/models/longt5/modeling_longt5.py @@ -1966,13 +1966,12 @@ def forward( ... ) >>> # Let's try a very long input. - >>> input_ids = tokenizer( - ... "summarize: " + 100 * "studies have shown that owning a dog is good for you ", return_tensors="pt" - ... ).input_ids # Batch size 1 + >>> inputs = tokenizer(100 * "studies have shown that owning a dog is good for you ", return_tensors="pt") + >>> input_ids = inputs.input_ids >>> outputs = model.generate(input_ids) >>> print(tokenizer.decode(outputs[0], skip_special_tokens=True)) - abstractthe aim of this article is to summarize the studies have shown that owning a dog + abstractthe aim of this article is to provide an overview of the literature on the role of dog ```""" use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict From a541d97477a8901e37e5f850f2cd707ffc82445b Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Thu, 18 Aug 2022 10:56:21 +0100 Subject: [PATCH 113/539] Generate: validate model_kwargs on FLAX (and catch typos in generate arguments) (#18653) --- src/transformers/generation_flax_utils.py | 24 ++++++++++++++++++- .../generation/test_generation_flax_utils.py | 21 ++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/src/transformers/generation_flax_utils.py b/src/transformers/generation_flax_utils.py index fd26a605c48bac..1c052aae7bafb6 100644 --- a/src/transformers/generation_flax_utils.py +++ b/src/transformers/generation_flax_utils.py @@ -15,9 +15,10 @@ # limitations under the License. +import inspect import warnings from functools import partial -from typing import Dict, Optional +from typing import Any, Dict, Optional import numpy as np @@ -160,6 +161,24 @@ def _adapt_logits_for_beam_search(self, logits): """ return logits + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): + """Validates model kwargs for generation. Generate argument typos will also be caught here.""" + unused_model_args = [] + model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) + # `kwargs` if often used to handle optional forward pass inputs like `attention_mask`. If + # `prepare_inputs_for_generation` doesn't accept `kwargs`, then a stricter check can be made ;) + if "kwargs" in model_args: + model_args |= set(inspect.signature(self.__call__).parameters) + for key, value in model_kwargs.items(): + if value is not None and key not in model_args: + unused_model_args.append(key) + + if unused_model_args: + raise ValueError( + f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" + " generate arguments will also show up in this list)" + ) + def generate( self, input_ids: jnp.ndarray, @@ -262,6 +281,9 @@ def generate( >>> outputs = model.generate(input_ids=input_ids, max_length=20, top_k=30, do_sample=True) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ```""" + # Validate model kwargs + self._validate_model_kwargs(model_kwargs.copy()) + # set init values bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id diff --git a/tests/generation/test_generation_flax_utils.py b/tests/generation/test_generation_flax_utils.py index b7b84d8db7250e..aabab559853bb0 100644 --- a/tests/generation/test_generation_flax_utils.py +++ b/tests/generation/test_generation_flax_utils.py @@ -13,6 +13,7 @@ # limitations under the License. import random +import unittest import numpy as np @@ -26,6 +27,7 @@ import jax.numpy as jnp from jax import jit + from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 @@ -273,3 +275,22 @@ def test_beam_search_generate_attn_mask(self): jit_generation_outputs = jit_generate(input_ids, attention_mask=attention_mask).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) + + +@require_flax +class FlaxGenerationIntegrationTests(unittest.TestCase): + def test_validate_generation_inputs(self): + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert") + model = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only") + + encoder_input_str = "Hello world" + input_ids = tokenizer(encoder_input_str, return_tensors="np").input_ids + + # typos are quickly detected (the correct argument is `do_sample`) + with self.assertRaisesRegex(ValueError, "do_samples"): + model.generate(input_ids, do_samples=True) + + # arbitrary arguments that will not be used anywhere are also not accepted + with self.assertRaisesRegex(ValueError, "foo"): + fake_model_kwargs = {"foo": "bar"} + model.generate(input_ids, **fake_model_kwargs) From 2c947d29399e30e8c9a67c5709d4c54949dc1aee Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 18 Aug 2022 12:57:18 +0200 Subject: [PATCH 114/539] Ping `detectron2` for CircleCI tests (#18680) Co-authored-by: ydshieh --- .circleci/config.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 666505ab3b4389..3e5c6aaa88581a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1003,7 +1003,10 @@ jobs: - run: pip install --upgrade pip - run: pip install .[torch,testing,vision] - run: pip install torchvision - - run: python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' + # The commit `36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0` in `detectron2` break things. + # See https://github.com/facebookresearch/detectron2/commit/36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0#comments. + # TODO: Revert this change back once the above issue is fixed. + - run: python -m pip install 'git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13' - run: sudo apt install tesseract-ocr - run: pip install pytesseract - save_cache: From 780253ce3d7b8f4ea09a92d7dfe8e3a16d549e20 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 18 Aug 2022 12:56:27 +0100 Subject: [PATCH 115/539] Rename method to avoid clash with property (#18677) --- src/transformers/image_utils.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/image_utils.py b/src/transformers/image_utils.py index 437e7c5685586b..120d7b3c1bd26c 100644 --- a/src/transformers/image_utils.py +++ b/src/transformers/image_utils.py @@ -131,7 +131,7 @@ def convert_rgb(self, image): return image.convert("RGB") - def rescale(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray: + def rescale_image(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray: """ Rescale a numpy image by scale amount """ @@ -163,7 +163,7 @@ def to_numpy_array(self, image, rescale=None, channel_first=True): rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale if rescale: - image = self.rescale(image.astype(np.float32), 1 / 255.0) + image = self.rescale_image(image.astype(np.float32), 1 / 255.0) if channel_first and image.ndim == 3: image = image.transpose(2, 0, 1) @@ -214,9 +214,9 @@ def normalize(self, image, mean, std, rescale=False): # type it may need rescaling. elif rescale: if isinstance(image, np.ndarray): - image = self.rescale(image.astype(np.float32), 1 / 255.0) + image = self.rescale_image(image.astype(np.float32), 1 / 255.0) elif is_torch_tensor(image): - image = self.rescale(image.float(), 1 / 255.0) + image = self.rescale_image(image.float(), 1 / 255.0) if isinstance(image, np.ndarray): if not isinstance(mean, np.ndarray): From 76454b08c8ec09b0debeb1c94a3855cde8167d84 Mon Sep 17 00:00:00 2001 From: regisss <15324346+regisss@users.noreply.github.com> Date: Thu, 18 Aug 2022 15:13:54 +0200 Subject: [PATCH 116/539] Rename second input dimension from "sequence" to "num_channels" for CV models (#17976) --- src/transformers/models/beit/configuration_beit.py | 2 +- src/transformers/models/convnext/configuration_convnext.py | 2 +- .../models/data2vec/configuration_data2vec_vision.py | 2 +- src/transformers/models/deit/configuration_deit.py | 2 +- src/transformers/models/detr/configuration_detr.py | 4 ++-- .../models/layoutlmv3/configuration_layoutlmv3.py | 2 +- src/transformers/models/mobilevit/configuration_mobilevit.py | 2 +- src/transformers/models/resnet/configuration_resnet.py | 2 +- src/transformers/models/vit/configuration_vit.py | 2 +- tests/onnx/test_onnx_v2.py | 1 + 10 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/transformers/models/beit/configuration_beit.py b/src/transformers/models/beit/configuration_beit.py index c745f3227d64a5..092f33ad85d383 100644 --- a/src/transformers/models/beit/configuration_beit.py +++ b/src/transformers/models/beit/configuration_beit.py @@ -194,7 +194,7 @@ class BeitOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) diff --git a/src/transformers/models/convnext/configuration_convnext.py b/src/transformers/models/convnext/configuration_convnext.py index 9f77c0099299ca..0b31da4370bfc5 100644 --- a/src/transformers/models/convnext/configuration_convnext.py +++ b/src/transformers/models/convnext/configuration_convnext.py @@ -117,7 +117,7 @@ class ConvNextOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) diff --git a/src/transformers/models/data2vec/configuration_data2vec_vision.py b/src/transformers/models/data2vec/configuration_data2vec_vision.py index a7dd85b817348a..d6fc7871766faf 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_vision.py +++ b/src/transformers/models/data2vec/configuration_data2vec_vision.py @@ -193,7 +193,7 @@ class Data2VecVisionOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) diff --git a/src/transformers/models/deit/configuration_deit.py b/src/transformers/models/deit/configuration_deit.py index df74664ace6133..1e9154eeca4af2 100644 --- a/src/transformers/models/deit/configuration_deit.py +++ b/src/transformers/models/deit/configuration_deit.py @@ -137,7 +137,7 @@ class DeiTOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) diff --git a/src/transformers/models/detr/configuration_detr.py b/src/transformers/models/detr/configuration_detr.py index fa8086efc46478..e46a5d610e8a8e 100644 --- a/src/transformers/models/detr/configuration_detr.py +++ b/src/transformers/models/detr/configuration_detr.py @@ -220,8 +220,8 @@ class DetrOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "sequence"}), - ("pixel_mask", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels"}), + ("pixel_mask", {0: "batch"}), ] ) diff --git a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py index d9ddde6289c9ab..ddf86ceaa1a49b 100644 --- a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py @@ -212,7 +212,7 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("input_ids", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), - ("pixel_values", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) diff --git a/src/transformers/models/mobilevit/configuration_mobilevit.py b/src/transformers/models/mobilevit/configuration_mobilevit.py index 87a8a009ddc30f..e2b2c568f62d6f 100644 --- a/src/transformers/models/mobilevit/configuration_mobilevit.py +++ b/src/transformers/models/mobilevit/configuration_mobilevit.py @@ -171,7 +171,7 @@ class MobileViTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: - return OrderedDict([("pixel_values", {0: "batch"})]) + return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels"})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: diff --git a/src/transformers/models/resnet/configuration_resnet.py b/src/transformers/models/resnet/configuration_resnet.py index 9bfc694bb1442a..61a7fc86de3afa 100644 --- a/src/transformers/models/resnet/configuration_resnet.py +++ b/src/transformers/models/resnet/configuration_resnet.py @@ -105,7 +105,7 @@ class ResNetOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) diff --git a/src/transformers/models/vit/configuration_vit.py b/src/transformers/models/vit/configuration_vit.py index e84fc6c25f4a4b..a65790f30100d6 100644 --- a/src/transformers/models/vit/configuration_vit.py +++ b/src/transformers/models/vit/configuration_vit.py @@ -135,7 +135,7 @@ class ViTOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index b3c0ffb1f371b9..829c7ec0a42ab0 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -199,6 +199,7 @@ def test_values_override(self): ("roformer", "junnyu/roformer_chinese_base"), ("squeezebert", "squeezebert/squeezebert-uncased"), ("mobilebert", "google/mobilebert-uncased"), + ("mobilevit", "apple/mobilevit-small"), ("xlm", "xlm-clm-ende-1024"), ("xlm-roberta", "xlm-roberta-base"), ("layoutlm", "microsoft/layoutlm-base-uncased"), From 5987c637ee68aacd78945697c636abcd204b1997 Mon Sep 17 00:00:00 2001 From: lewtun Date: Thu, 18 Aug 2022 14:47:50 +0100 Subject: [PATCH 117/539] Fix repo consistency (#18682) --- src/transformers/models/levit/configuration_levit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/levit/configuration_levit.py b/src/transformers/models/levit/configuration_levit.py index a1113d7a7512ce..69032a1faae6ad 100644 --- a/src/transformers/models/levit/configuration_levit.py +++ b/src/transformers/models/levit/configuration_levit.py @@ -137,7 +137,7 @@ class LevitOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels"}), ] ) From d243112b651f64b87d5d2509ff75042794484c20 Mon Sep 17 00:00:00 2001 From: Severin Simmler Date: Thu, 18 Aug 2022 16:06:16 +0200 Subject: [PATCH 118/539] Fix breaking change in `onnxruntime` for ONNX quantization (#18336) * Fix quantization * Save model * Remove unused comments * Fix formatting --- src/transformers/convert_graph_to_onnx.py | 43 ++++++++++++++++------- 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/src/transformers/convert_graph_to_onnx.py b/src/transformers/convert_graph_to_onnx.py index 59fb8ed39b01c1..53a518fd57dd65 100644 --- a/src/transformers/convert_graph_to_onnx.py +++ b/src/transformers/convert_graph_to_onnx.py @@ -435,29 +435,48 @@ def quantize(onnx_model_path: Path) -> Path: Returns: The Path generated for the quantized """ import onnx - from onnxruntime.quantization import QuantizationMode, quantize + from onnx.onnx_pb import ModelProto + from onnxruntime.quantization import QuantizationMode + from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer + from onnxruntime.quantization.registry import IntegerOpsRegistry + # Load the ONNX model onnx_model = onnx.load(onnx_model_path.as_posix()) - # Discussed with @yufenglee from ONNX runtime, this will be address in the next release of onnxruntime - print( - "As of onnxruntime 1.4.0, models larger than 2GB will fail to quantize due to protobuf constraint.\n" - "This limitation will be removed in the next release of onnxruntime." - ) + if parse(onnx.__version__) < parse("1.5.0"): + print( + "Models larger than 2GB will fail to quantize due to protobuf constraint.\n" + "Please upgrade to onnxruntime >= 1.5.0." + ) - quantized_model = quantize( - model=onnx_model, - quantization_mode=QuantizationMode.IntegerOps, - force_fusions=True, - symmetric_weight=True, + # Copy it + copy_model = ModelProto() + copy_model.CopyFrom(onnx_model) + + # Construct quantizer + quantizer = ONNXQuantizer( + model=copy_model, + per_channel=False, + reduce_range=False, + mode=QuantizationMode.IntegerOps, + static=False, + weight_qType=True, + input_qType=False, + tensors_range=None, + nodes_to_quantize=None, + nodes_to_exclude=None, + op_types_to_quantize=list(IntegerOpsRegistry), ) + # Quantize and export + quantizer.quantize_model() + # Append "-quantized" at the end of the model's name quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized") # Save model print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}") - onnx.save_model(quantized_model, quantized_model_path.as_posix()) + onnx.save_model(quantizer.model.model, quantized_model_path.as_posix()) return quantized_model_path From 358fc18613a737f3fbcebc5b2abed43386ff9cbc Mon Sep 17 00:00:00 2001 From: Zachary Mueller Date: Thu, 18 Aug 2022 10:57:39 -0400 Subject: [PATCH 119/539] Add evaluate to examples requirements (#18666) --- examples/pytorch/audio-classification/requirements.txt | 1 + examples/pytorch/image-classification/requirements.txt | 1 + examples/pytorch/language-modeling/requirements.txt | 1 + examples/pytorch/multiple-choice/requirements.txt | 1 + examples/pytorch/question-answering/requirements.txt | 1 + examples/pytorch/semantic-segmentation/requirements.txt | 3 ++- examples/pytorch/speech-pretraining/requirements.txt | 2 +- examples/pytorch/speech-recognition/requirements.txt | 1 + examples/pytorch/summarization/requirements.txt | 1 + examples/pytorch/text-classification/requirements.txt | 1 + examples/pytorch/token-classification/requirements.txt | 1 + examples/pytorch/translation/requirements.txt | 1 + 12 files changed, 13 insertions(+), 2 deletions(-) diff --git a/examples/pytorch/audio-classification/requirements.txt b/examples/pytorch/audio-classification/requirements.txt index 6ae3f11c5c86d9..acf058d4cf46ea 100644 --- a/examples/pytorch/audio-classification/requirements.txt +++ b/examples/pytorch/audio-classification/requirements.txt @@ -1,4 +1,5 @@ datasets>=1.14.0 +evaluate librosa torchaudio torch>=1.6 \ No newline at end of file diff --git a/examples/pytorch/image-classification/requirements.txt b/examples/pytorch/image-classification/requirements.txt index aadc0e9088f868..9409ce890022df 100644 --- a/examples/pytorch/image-classification/requirements.txt +++ b/examples/pytorch/image-classification/requirements.txt @@ -1,3 +1,4 @@ torch>=1.5.0 torchvision>=0.6.0 datasets>=1.17.0 +evaluate diff --git a/examples/pytorch/language-modeling/requirements.txt b/examples/pytorch/language-modeling/requirements.txt index bec267b98a11d8..f43c7b7575beaa 100644 --- a/examples/pytorch/language-modeling/requirements.txt +++ b/examples/pytorch/language-modeling/requirements.txt @@ -3,3 +3,4 @@ torch >= 1.3 datasets >= 1.8.0 sentencepiece != 0.1.92 protobuf +evaluate diff --git a/examples/pytorch/multiple-choice/requirements.txt b/examples/pytorch/multiple-choice/requirements.txt index 119271b050d23b..cf8760d7602ae9 100644 --- a/examples/pytorch/multiple-choice/requirements.txt +++ b/examples/pytorch/multiple-choice/requirements.txt @@ -2,3 +2,4 @@ accelerate sentencepiece != 0.1.92 protobuf torch >= 1.3 +evaluate diff --git a/examples/pytorch/question-answering/requirements.txt b/examples/pytorch/question-answering/requirements.txt index 0d4fe3df5cc898..20d27236f3f25f 100644 --- a/examples/pytorch/question-answering/requirements.txt +++ b/examples/pytorch/question-answering/requirements.txt @@ -1,3 +1,4 @@ accelerate datasets >= 1.8.0 torch >= 1.3.0 +evaluate \ No newline at end of file diff --git a/examples/pytorch/semantic-segmentation/requirements.txt b/examples/pytorch/semantic-segmentation/requirements.txt index 410ca78682c1b3..b839361cf27745 100644 --- a/examples/pytorch/semantic-segmentation/requirements.txt +++ b/examples/pytorch/semantic-segmentation/requirements.txt @@ -1,3 +1,4 @@ git://github.com/huggingface/accelerate.git datasets >= 2.0.0 -torch >= 1.3 \ No newline at end of file +torch >= 1.3 +evaluate \ No newline at end of file diff --git a/examples/pytorch/speech-pretraining/requirements.txt b/examples/pytorch/speech-pretraining/requirements.txt index 64a48c39672085..55847dea3f4188 100644 --- a/examples/pytorch/speech-pretraining/requirements.txt +++ b/examples/pytorch/speech-pretraining/requirements.txt @@ -2,4 +2,4 @@ datasets >= 1.12.0 torch >= 1.5 torchaudio accelerate >= 0.5.0 -librosa +librosa \ No newline at end of file diff --git a/examples/pytorch/speech-recognition/requirements.txt b/examples/pytorch/speech-recognition/requirements.txt index 219959a4b26773..a16697b038c635 100644 --- a/examples/pytorch/speech-recognition/requirements.txt +++ b/examples/pytorch/speech-recognition/requirements.txt @@ -3,3 +3,4 @@ torch >= 1.5 torchaudio librosa jiwer +evaluate diff --git a/examples/pytorch/summarization/requirements.txt b/examples/pytorch/summarization/requirements.txt index 3c2faf75b85548..8386b3fc1b4c0f 100644 --- a/examples/pytorch/summarization/requirements.txt +++ b/examples/pytorch/summarization/requirements.txt @@ -6,3 +6,4 @@ rouge-score nltk py7zr torch >= 1.3 +evaluate diff --git a/examples/pytorch/text-classification/requirements.txt b/examples/pytorch/text-classification/requirements.txt index 2a0e0d7deb3328..32722ebcde53da 100644 --- a/examples/pytorch/text-classification/requirements.txt +++ b/examples/pytorch/text-classification/requirements.txt @@ -5,3 +5,4 @@ scipy scikit-learn protobuf torch >= 1.3 +evaluate \ No newline at end of file diff --git a/examples/pytorch/token-classification/requirements.txt b/examples/pytorch/token-classification/requirements.txt index 8e03da16af6e89..9daf1e620243c1 100644 --- a/examples/pytorch/token-classification/requirements.txt +++ b/examples/pytorch/token-classification/requirements.txt @@ -2,3 +2,4 @@ accelerate seqeval datasets >= 1.8.0 torch >= 1.3 +evaluate \ No newline at end of file diff --git a/examples/pytorch/translation/requirements.txt b/examples/pytorch/translation/requirements.txt index c34795fffaa42e..cd9068b86527ac 100644 --- a/examples/pytorch/translation/requirements.txt +++ b/examples/pytorch/translation/requirements.txt @@ -5,3 +5,4 @@ protobuf sacrebleu >= 1.4.12 py7zr torch >= 1.3 +evaluate \ No newline at end of file From a123eee9df11b5a116452c3f539f2ff0427324d2 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Thu, 18 Aug 2022 17:34:48 +0200 Subject: [PATCH 120/539] [bnb] Move documentation (#18671) * fix bnb documentation - move bnb documentation to `infer_gpu_many` * small refactoring - added text on infer_gpu_one - added a small note on infer_gpu_many - added customized multi gpu example on infer_gpu_many * Update docs/source/en/perf_infer_gpu_many.mdx Co-authored-by: Stas Bekman * apply suggestions Co-authored-by: Stas Bekman * Apply suggestions from code review Co-authored-by: Stas Bekman Co-authored-by: Stas Bekman --- docs/source/en/perf_infer_gpu_many.mdx | 7 ++- docs/source/en/perf_infer_gpu_one.mdx | 64 +++++++++++++++++++++++++- docs/source/en/perf_train_gpu_one.mdx | 55 +--------------------- 3 files changed, 70 insertions(+), 56 deletions(-) diff --git a/docs/source/en/perf_infer_gpu_many.mdx b/docs/source/en/perf_infer_gpu_many.mdx index 26f1f1be38d916..2f29c3c77bc343 100644 --- a/docs/source/en/perf_infer_gpu_many.mdx +++ b/docs/source/en/perf_infer_gpu_many.mdx @@ -11,4 +11,9 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o # Efficient Inference on a Multiple GPUs -This document will be completed soon with information on how to infer on a multiple GPUs. In the meantime you can check out [the guide for training on a single GPU](perf_train_gpu_one) and [the guide for inference on CPUs](perf_infer_cpu). \ No newline at end of file +This document contains information on how to efficiently infer on a multiple GPUs. + + +Note: A multi GPU setup can use the majority of the strategies described in the [single GPU section](perf infer gpu one). You must be aware of simple techniques, though, that can be used for a better usage. + + diff --git a/docs/source/en/perf_infer_gpu_one.mdx b/docs/source/en/perf_infer_gpu_one.mdx index 044ff67048627a..d794e6c8ec537b 100644 --- a/docs/source/en/perf_infer_gpu_one.mdx +++ b/docs/source/en/perf_infer_gpu_one.mdx @@ -11,4 +11,66 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o # Efficient Inference on a Single GPU -This document will be completed soon with information on how to infer on a single GPU. In the meantime you can check out [the guide for training on a single GPU](perf_train_gpu_one) and [the guide for inference on CPUs](perf_infer_cpu). \ No newline at end of file +This document will be completed soon with information on how to infer on a single GPU. In the meantime you can check out [the guide for training on a single GPU](perf_train_gpu_one) and [the guide for inference on CPUs](perf_infer_cpu). + +## `bitsandbytes` integration for Int8 mixed-precision matrix decomposition + +Note that this feature is also totally applicable in a multi GPU setup as well. + +From the paper [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://arxiv.org/abs/2208.07339), we support HuggingFace integration for all models in the Hub with a few lines of code. +The method reduce `nn.Linear` size by 2 for `float16` and `bfloat16` weights and by 4 for `float32` weights, with close to no impact to the quality by operating on the outliers in half-precision. + +![HFxbitsandbytes.png](https://s3.amazonaws.com/moonup/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) + +Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) a systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models. +For more details regarding the method, check out the [paper](https://arxiv.org/abs/2208.07339) or our [blogpost about the integration](https://huggingface.co/blog/hf-bitsandbytes-integration). + +![MixedInt8.gif](https://s3.amazonaws.com/moonup/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) + +Note, that you would require a GPU to run mixed-8bit models as the kernels have been compiled for GPUs only. Make sure that you have enough GPU memory to store the quarter (or half if your model weights are in half precision) of the model before using this feature. +Below are some notes to help you use this module, or follow the demos on [Google colab](#colab-demos). + +### Requirements + +- Make sure you run that on NVIDIA GPUs that support 8-bit tensor cores (Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100). +- Install the correct version of `bitsandbytes` by running: +`pip install bitsandbytes>=0.31.5` +- Install `accelerate` +`pip install accelerate>=0.12.0` + +### Running mixed-int8 models - single GPU setup + +After installing the required libraries, the way to load your mixed 8-bit model is as follows: +```py +model_name = "bigscience/bloom-2b5" +model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) +``` + +### Running mixed-int8 models - multi GPU setup + +The way to load your mixed 8-bit model in multiple GPUs is as follows (same command as single GPU setup): +```py +model_name = "bigscience/bloom-2b5" +model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) +``` +But you can control the GPU RAM you want to allocate on each GPU using `accelerate`. Use the `max_memory` argument as follows: + +```py +max_memory_mapping = {0: "1GB", 1: "2GB"} +model_name = "bigscience/bloom-3b" +model_8bit = AutoModelForCausalLM.from_pretrained( + model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping +) +``` +In this example, the first GPU will use 1GB of memory and the second 2GB. + +### Colab demos + +With this method you can infer on models that were not possible to infer on a Google Colab before. +Check out the demo for running T5-11b (42GB in fp32)! Using 8-bit quantization on Google Colab: + +[![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) + +Or this demo for BLOOM-3B: + +[![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) \ No newline at end of file diff --git a/docs/source/en/perf_train_gpu_one.mdx b/docs/source/en/perf_train_gpu_one.mdx index 32748186a42fdf..38d8534d85c840 100644 --- a/docs/source/en/perf_train_gpu_one.mdx +++ b/docs/source/en/perf_train_gpu_one.mdx @@ -732,57 +732,4 @@ TrainingArguments(torchdynamo="fx2trt-f16") #enable tensorRT fp16 This feature involves 3 different libraries. To install them, please follow the instructions below: - [Torchdynamo installation](https://github.com/pytorch/torchdynamo#requirements-and-setup) - [Functorch installation](https://github.com/pytorch/functorch#install) -- [Torch-TensorRT(FX) installation](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst#installation) - -## `bitsandbytes` integration for Int8 mixed-precision matrix decomposition - -From the paper [`LLM.int8() : 8-bit Matrix Multiplication for Transformers at Scale`](https://arxiv.org/abs/2208.07339), we support HuggingFace integration for all models in the Hub with a few lines of code. -The method reduce `nn.Linear` size by 2 for `float16` and `bfloat16` weights and by 4 for `float32` weights, with close to no impact to the quality by operating on the outliers in half-precision. - -![HFxbitsandbytes.png](https://s3.amazonaws.com/moonup/production/uploads/1659861207959-62441d1d9fdefb55a0b7d12c.png) - -Int8 mixed-precision matrix decomposition works by separating a matrix multiplication into two streams: (1) a systematic feature outlier stream matrix multiplied in fp16 (0.01%), (2) a regular stream of int8 matrix multiplication (99.9%). With this method, int8 inference with no predictive degradation is possible for very large models. -For more details regarding the method, check out the [paper](https://arxiv.org/abs/2208.07339) or our [blogpost about the integration](https://huggingface.co/blog/hf-bitsandbytes-integration). - -![MixedInt8.gif](https://s3.amazonaws.com/moonup/production/uploads/1660567469965-62441d1d9fdefb55a0b7d12c.gif) - -Note, that you would require a GPU to run mixed-8bit models as the kernels have been compiled for GPUs only. Make sure that you have enough GPU memory to store the quarter (or half if your model weights are in half precision) of the model before using this feature. -Below are some notes to help you use this module, or follow the demos on [Google colab](#colab-demos). - -### Requirements - -- Make sure you run that on NVIDIA GPUs that support 8-bit tensor cores (Turing, Ampere or newer architectures - e.g. T4, RTX20s RTX30s, A40-A100). -- Install the correct version of `bitsandbytes` by running: -`pip install bitsandbytes>=0.31.5` -- Install `accelerate` -`pip install accelerate>=0.12.0` - -### Running mixed-int8 models - -After installing the required libraries, the way to load your mixed 8-bit model is as follows: -```py -model_name = "bigscience/bloom-2b5" -model_8bit = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", load_in_8bit=True) -``` -The current implementation supports a multi-GPU setup when using `accelerate`. If you want to control the GPU memory you want to allocate for each GPU use the `max_memory` argument as follows: - -```py -max_memory_mapping = {0: "1GB", 1: "2GB"} -model_name = "bigscience/bloom-3b" -model_8bit = AutoModelForCausalLM.from_pretrained( - model_name, device_map="auto", load_in_8bit=True, max_memory=max_memory_mapping -) -``` - -In this example, the first GPU will use 1GB of memory and the second 2GB. - -### Colab demos - -With this method you can infer on models that were not possible to infer on a Google Colab before. -Check out the demo for running T5-11b (42GB in fp32)! Using 8-bit quantization on Google Colab: - -[![Open In Colab: T5-11b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1YORPWx4okIHXnjW7MSAidXN29mPVNT7F?usp=sharing) - -Or this demo for BLOOM-3B: - -[![Open In Colab: BLOOM-3b demo](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1qOjXfQIAULfKvZqwCen8-MoWKGdSatZ4?usp=sharing) \ No newline at end of file +- [Torch-TensorRT(FX) installation](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst#installation) \ No newline at end of file From bbbb453e5869a5fec4925b02f1265c9e6bfc3ebb Mon Sep 17 00:00:00 2001 From: Loubna Ben Allal <44069155+loubnabnl@users.noreply.github.com> Date: Thu, 18 Aug 2022 18:24:24 +0200 Subject: [PATCH 121/539] Add an examples folder for code downstream tasks (#18679) * add examples subfolder * mention examples in codeparrot readme * use Trainer optimizer and scheduler type and add output_dir as argument * add example of text-to-python and python-to-text models * mention the downstream examples in the readme * fix typo --- .../research_projects/codeparrot/README.md | 6 +- .../codeparrot/examples/README.md | 58 ++++++++ .../codeparrot/examples/requirements.txt | 5 + .../examples/train_complexity_predictor.py | 132 ++++++++++++++++++ .../scripts/minhash_deduplication.py | 1 - 5 files changed, 200 insertions(+), 2 deletions(-) create mode 100644 examples/research_projects/codeparrot/examples/README.md create mode 100644 examples/research_projects/codeparrot/examples/requirements.txt create mode 100644 examples/research_projects/codeparrot/examples/train_complexity_predictor.py diff --git a/examples/research_projects/codeparrot/README.md b/examples/research_projects/codeparrot/README.md index ef92606c545a78..6c57c4350fbc02 100644 --- a/examples/research_projects/codeparrot/README.md +++ b/examples/research_projects/codeparrot/README.md @@ -12,7 +12,11 @@ This is an open-source effort to train and evaluate code generation models. Code - continuously push checkpoints to the hub with `huggingface_hub` - stream the dataset with `datasets` during training to avoid disk bottlenecks - apply the `code_eval` metric in `datasets` to evaluate on [OpenAI's _HumanEval_ benchmark](https://huggingface.co/datasets/openai_humaneval) - +- showcase examples for downstream tasks with code models in [examples](https://github.com/huggingface/transformers/tree/main/examples/research_projects/codeparrot/examples) folder: + - Algorithmic complexity prediction + - Code generation from english text + - Code explanation + ## Installation To install the dependencies simply run the following command: ```bash diff --git a/examples/research_projects/codeparrot/examples/README.md b/examples/research_projects/codeparrot/examples/README.md new file mode 100644 index 00000000000000..c1980262d8275b --- /dev/null +++ b/examples/research_projects/codeparrot/examples/README.md @@ -0,0 +1,58 @@ +# Examples +In this folder we showcase some examples to use code models for downstream tasks. + +## Complexity prediction +In this task we want to predict the complexity of Java programs in [CodeComplex](https://huggingface.co/datasets/codeparrot/codecomplex) dataset. Using Hugging Face `trainer`, we finetuned [multilingual CodeParrot](https://huggingface.co/codeparrot/codeparrot-small-multi) and [UniXcoder](https://huggingface.co/microsoft/unixcoder-base-nine) on it, and we used the latter to build this Java complexity prediction [space](https://huggingface.co/spaces/codeparrot/code-complexity-predictor) on Hugging Face hub. + +To fine-tune a model on this dataset you can use the following commands: + +```python +python train_complexity_predictor.py \ + --model_ckpt microsoft/unixcoder-base-nine \ + --num_epochs 60 \ + --num_warmup_steps 10 \ + --batch_size 8 \ + --learning_rate 5e-4 +``` + +## Code generation: text to python +In this task we want to train a model to generate code from english text. We finetuned Codeparrot-small on [github-jupyter-text-to-code](https://huggingface.co/datasets/codeparrot/github-jupyter-text-to-code), a dataset where the samples are a succession of docstrings and their Python code, originally extracted from Jupyter notebooks parsed in this [dataset](https://huggingface.co/datasets/codeparrot/github-jupyter-parsed). + +To fine-tune a model on this dataset we use the same [script](https://github.com/huggingface/transformers/blob/main/examples/research_projects/codeparrot/scripts/codeparrot_training.py) as the pretraining of codeparrot: + +```python +accelerate launch scripts/codeparrot_training.py \ + --model_ckpt codeparrot/codeparrot-small \ + --dataset_name_train codeparrot/github-jupyter-text-to-code \ + --dataset_name_valid codeparrot/github-jupyter-text-to-code \ + --train_batch_size 12 \ + --valid_batch_size 12 \ + --learning_rate 5e-4 \ + --num_warmup_steps 100 \ + --gradient_accumulation 1 \ + --gradient_checkpointing False \ + --max_train_steps 3000 \ + --save_checkpoint_steps 200 \ + --save_dir jupyter-text-to-python +``` + +## Code explanation: python to text +In this task we want to train a model to explain python code. We finetuned Codeparrot-small on [github-jupyter-code-to-text](https://huggingface.co/datasets/codeparrot/github-jupyter-code-to-text), a dataset where the samples are a succession of Python code and its explanation as a docstring, we just inverted the order of text and code pairs in github-jupyter-code-to-text dataset and added the delimiters "Explanation:" and "End of explanation" inside the doctrings. + +To fine-tune a model on this dataset we use the same [script](https://github.com/huggingface/transformers/blob/main/examples/research_projects/codeparrot/scripts/codeparrot_training.py) as the pretraining of codeparrot: + +```python +accelerate launch scripts/codeparrot_training.py \ + --model_ckpt codeparrot/codeparrot-small \ + --dataset_name_train codeparrot/github-jupyter-code-to-text \ + --dataset_name_valid codeparrot/github-jupyter-code-to-text \ + --train_batch_size 12 \ + --valid_batch_size 12 \ + --learning_rate 5e-4 \ + --num_warmup_steps 100 \ + --gradient_accumulation 1 \ + --gradient_checkpointing False \ + --max_train_steps 3000 \ + --save_checkpoint_steps 200 \ + --save_dir jupyter-python-to-text +``` \ No newline at end of file diff --git a/examples/research_projects/codeparrot/examples/requirements.txt b/examples/research_projects/codeparrot/examples/requirements.txt new file mode 100644 index 00000000000000..997334e27e18fc --- /dev/null +++ b/examples/research_projects/codeparrot/examples/requirements.txt @@ -0,0 +1,5 @@ +datasets==2.3.2 +transformers==4.21.1 +wandb==0.13.1 +evaluate==0.2.2 +scikit-learn==1.1.2 \ No newline at end of file diff --git a/examples/research_projects/codeparrot/examples/train_complexity_predictor.py b/examples/research_projects/codeparrot/examples/train_complexity_predictor.py new file mode 100644 index 00000000000000..8fc30b912468ba --- /dev/null +++ b/examples/research_projects/codeparrot/examples/train_complexity_predictor.py @@ -0,0 +1,132 @@ +import argparse +from copy import deepcopy + +import numpy as np +from datasets import ClassLabel, DatasetDict, load_dataset + +from evaluate import load +from transformers import ( + AutoModelForSequenceClassification, + AutoTokenizer, + DataCollatorWithPadding, + Trainer, + TrainerCallback, + TrainingArguments, + set_seed, +) + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--model_ckpt", type=str, default="microsoft/unixcoder-base-nine") + parser.add_argument("--num_epochs", type=int, default=5) + parser.add_argument("--batch_size", type=int, default=6) + parser.add_argument("--gradient_accumulation_steps", type=int, default=1) + parser.add_argument("--freeze", type=bool, default=True) + parser.add_argument("--learning_rate", type=float, default=5e-4) + parser.add_argument("--seed", type=int, default=0) + parser.add_argument("--lr_scheduler_type", type=str, default="cosine") + parser.add_argument("--num_warmup_steps", type=int, default=10) + parser.add_argument("--weight_decay", type=float, default=0.01) + parser.add_argument("--output_dir", type=str, default="./results") + return parser.parse_args() + + +metric = load("accuracy") + + +def compute_metrics(eval_pred): + predictions, labels = eval_pred + predictions = np.argmax(predictions, axis=1) + return metric.compute(predictions=predictions, references=labels) + + +class CustomCallback(TrainerCallback): + def __init__(self, trainer) -> None: + super().__init__() + self._trainer = trainer + + def on_epoch_end(self, args, state, control, **kwargs): + if control.should_evaluate: + control_copy = deepcopy(control) + self._trainer.evaluate(eval_dataset=self._trainer.train_dataset, metric_key_prefix="train") + return control_copy + + +def main(): + args = get_args() + set_seed(args.seed) + + dataset = load_dataset("codeparrot/codecomplex", split="train") + train_test = dataset.train_test_split(test_size=0.2) + test_validation = train_test["test"].train_test_split(test_size=0.5) + train_test_validation = DatasetDict( + { + "train": train_test["train"], + "test": test_validation["train"], + "valid": test_validation["test"], + } + ) + + print("Loading tokenizer and model") + tokenizer = AutoTokenizer.from_pretrained(args.model_ckpt) + tokenizer.pad_token = tokenizer.eos_token + model = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt, num_labels=7) + model.config.pad_token_id = model.config.eos_token_id + + if args.freeze: + for param in model.roberta.parameters(): + param.requires_grad = False + + labels = ClassLabel(num_classes=7, names=list(set(train_test_validation["train"]["complexity"]))) + + def tokenize(example): + inputs = tokenizer(example["src"], truncation=True, max_length=1024) + label = labels.str2int(example["complexity"]) + return { + "input_ids": inputs["input_ids"], + "attention_mask": inputs["attention_mask"], + "label": label, + } + + tokenized_datasets = train_test_validation.map( + tokenize, + batched=True, + remove_columns=train_test_validation["train"].column_names, + ) + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + + training_args = TrainingArguments( + output_dir=args.output_dir, + learning_rate=args.learning_rate, + lr_scheduler_type=args.lr_scheduler_type, + evaluation_strategy="epoch", + save_strategy="epoch", + logging_strategy="epoch", + per_device_train_batch_size=args.batch_size, + per_device_eval_batch_size=args.batch_size, + num_train_epochs=args.num_epochs, + gradient_accumulation_steps=args.gradient_accumulation_steps, + weight_decay=0.01, + metric_for_best_model="accuracy", + run_name="complexity-java", + report_to="wandb", + ) + + trainer = Trainer( + model=model, + args=training_args, + train_dataset=tokenized_datasets["train"], + eval_dataset=tokenized_datasets["valid"], + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=compute_metrics, + ) + + print("Training...") + trainer.add_callback(CustomCallback(trainer)) + trainer.train() + + +if __name__ == "__main__": + main() diff --git a/examples/research_projects/codeparrot/scripts/minhash_deduplication.py b/examples/research_projects/codeparrot/scripts/minhash_deduplication.py index cd72dcb70c9ed2..9e1ef11ff07d15 100644 --- a/examples/research_projects/codeparrot/scripts/minhash_deduplication.py +++ b/examples/research_projects/codeparrot/scripts/minhash_deduplication.py @@ -63,7 +63,6 @@ def add(self, code_key: Tuple, min_hash: MinHash) -> None: self._index.insert(code_key, min_hash) if len(close_duplicates) > 0: - for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(code_key) From e54a1b49aa6268c484625c6374f952f318914743 Mon Sep 17 00:00:00 2001 From: Atharva Ingle Date: Thu, 18 Aug 2022 23:16:57 +0530 Subject: [PATCH 122/539] `model.tie_weights()` should be applied after `accelerator.prepare()` (#18676) * `model.tie_weights()` should be applied after `accelerator.prepare` Weight tying should be done after the model has been moved to XLA device as mentioned on PyTorch/XLA Troubleshooting guide [here](https://github.com/pytorch/xla/blob/master/TROUBLESHOOTING.md#xla-tensor-quirks) * format code --- examples/pytorch/language-modeling/run_mlm_no_trainer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index c5f6aad4126f5a..c01f870cdd4d1c 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -518,10 +518,6 @@ def group_texts(examples): ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) - # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. - if accelerator.distributed_type == DistributedType.TPU: - model.tie_weights() - # Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be # shorter in multiprocess) @@ -544,6 +540,10 @@ def group_texts(examples): model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) + # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. + if accelerator.distributed_type == DistributedType.TPU: + model.tie_weights() + # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: From e95d433d77727a9babadf008dd621a2326d37303 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Fri, 19 Aug 2022 16:14:27 +0100 Subject: [PATCH 123/539] Generate: add missing `**model_kwargs` in sample tests (#18696) --- tests/generation/test_generation_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/generation/test_generation_utils.py b/tests/generation/test_generation_utils.py index ba13669368d228..62a3f588cf471b 100644 --- a/tests/generation/test_generation_utils.py +++ b/tests/generation/test_generation_utils.py @@ -327,6 +327,7 @@ def _sample_generate( remove_invalid_values=True, **logits_warper_kwargs, **process_kwargs, + **model_kwargs, ) torch.manual_seed(0) @@ -361,6 +362,7 @@ def _sample_generate( **kwargs, **model_kwargs, ) + return output_sample, output_generate def _beam_search_generate( From 1f3c2282b5f806a6b80dfc916925d257d9bde5f3 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 19 Aug 2022 22:55:33 +0200 Subject: [PATCH 124/539] Temp fix for broken detectron2 import (#18699) * add first generation tutorial * [Circle CI] Temporary fix for broken detectron2 import * remove generation --- .../models/layoutlmv2/modeling_layoutlmv2.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py index be31af99d6dfd8..c4e6ba0ab5aea4 100755 --- a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py @@ -43,12 +43,26 @@ from .configuration_layoutlmv2 import LayoutLMv2Config +logger = logging.get_logger(__name__) + + # soft dependency if is_detectron2_available(): import detectron2 - from detectron2.modeling import META_ARCH_REGISTRY -logger = logging.get_logger(__name__) + try: + from detectron2.modeling import META_ARCH_REGISTRY + except ImportError: + # NOTE: This is a temporary fix because currently there are + # import problems when using detectron2 from master (see issues below) + # it's better to have a silent error here in case someone imports this file + # without using the model which without this hack would break. + logger.warning( + "The detectron2 import seems to be broken. See:" + "https://github.com/facebookresearch/detectron2/issues/4489 or" + "https://github.com/facebookresearch/detectron2/issues/4487" + ) + pass _CHECKPOINT_FOR_DOC = "microsoft/layoutlmv2-base-uncased" _CONFIG_FOR_DOC = "LayoutLMv2Config" From 30992ef0d911bdeca425969d210771118a5cd1ac Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Sat, 20 Aug 2022 00:37:38 +0200 Subject: [PATCH 125/539] [Hotfix] pin detectron2 5aeb252 to avoid test fix (#18701) Co-authored-by: ydshieh --- docker/transformers-all-latest-gpu/Dockerfile | 2 +- docker/transformers-doc-builder/Dockerfile | 2 +- docker/transformers-pytorch-gpu/Dockerfile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index 4db6f51826f02b..76c97fab2a6596 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -40,7 +40,7 @@ RUN python3 -m pip uninstall -y flax jax RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python3 -c "from torch import version; print(version.__version__.split('+')[0])")+$CUDA.html RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT+cpu -f https://software.intel.com/ipex-whl-stable -RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip +RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13 pytesseract https://github.com/kpu/kenlm/archive/master.zip RUN python3 -m pip install -U "itsdangerous<2.1.0" RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate diff --git a/docker/transformers-doc-builder/Dockerfile b/docker/transformers-doc-builder/Dockerfile index de0eb1713727d9..15b643996558c0 100644 --- a/docker/transformers-doc-builder/Dockerfile +++ b/docker/transformers-doc-builder/Dockerfile @@ -11,7 +11,7 @@ RUN apt-get -y update && apt-get install -y libsndfile1-dev && apt install -y te RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed] RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python -c "from torch import version; print(version.__version__.split('+')[0])")+cpu.html -RUN python3 -m pip install --no-cache-dir torchvision git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip +RUN python3 -m pip install --no-cache-dir torchvision git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13 pytesseract https://github.com/kpu/kenlm/archive/master.zip RUN python3 -m pip install --no-cache-dir pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com RUN python3 -m pip install -U "itsdangerous<2.1.0" diff --git a/docker/transformers-pytorch-gpu/Dockerfile b/docker/transformers-pytorch-gpu/Dockerfile index 668bec3e715d86..c4a93b2e2c702c 100644 --- a/docker/transformers-pytorch-gpu/Dockerfile +++ b/docker/transformers-pytorch-gpu/Dockerfile @@ -23,7 +23,7 @@ RUN [ ${#TORCH_AUDIO} -gt 0 ] && VERSION='torchaudio=='TORCH_AUDIO'.*' || VERSI RUN python3 -m pip uninstall -y tensorflow flax RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python3 -c "from torch import version; print(version.__version__.split('+')[0])")+cu113.html -RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip +RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13 pytesseract https://github.com/kpu/kenlm/archive/master.zip RUN python3 -m pip install -U "itsdangerous<2.1.0" # When installing in editable mode, `transformers` is not recognized as a package. From 3fa45dbd91dcaa8cd8e4278da9ca3b4fced677a4 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 22 Aug 2022 11:28:23 +0200 Subject: [PATCH 126/539] Fix Data2VecVision ONNX test (#18587) Co-authored-by: lewtun Co-authored-by: ydshieh --- src/transformers/onnx/config.py | 1 + src/transformers/onnx/features.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/transformers/onnx/config.py b/src/transformers/onnx/config.py index fdcc12bdcd1f0e..3b789051a2203f 100644 --- a/src/transformers/onnx/config.py +++ b/src/transformers/onnx/config.py @@ -99,6 +99,7 @@ class OnnxConfig(ABC): "end_logits": {0: "batch", 1: "sequence"}, } ), + "semantic-segmentation": OrderedDict({"logits": {0: "batch", 1: "num_labels", 2: "height", 3: "width"}}), "seq2seq-lm": OrderedDict({"logits": {0: "batch", 1: "decoder_sequence"}}), "sequence-classification": OrderedDict({"logits": {0: "batch"}}), "token-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index fbfeb47250e73f..3596fe1840094f 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -25,6 +25,7 @@ AutoModelForMultipleChoice, AutoModelForObjectDetection, AutoModelForQuestionAnswering, + AutoModelForSemanticSegmentation, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, @@ -36,6 +37,7 @@ TFAutoModelForMaskedLM, TFAutoModelForMultipleChoice, TFAutoModelForQuestionAnswering, + TFAutoModelForSemanticSegmentation, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, @@ -94,6 +96,7 @@ class FeaturesManager: "image-classification": AutoModelForImageClassification, "image-segmentation": AutoModelForImageSegmentation, "masked-im": AutoModelForMaskedImageModeling, + "semantic-segmentation": AutoModelForSemanticSegmentation, } if is_tf_available(): _TASKS_TO_TF_AUTOMODELS = { @@ -105,6 +108,7 @@ class FeaturesManager: "token-classification": TFAutoModelForTokenClassification, "multiple-choice": TFAutoModelForMultipleChoice, "question-answering": TFAutoModelForQuestionAnswering, + "semantic-segmentation": TFAutoModelForSemanticSegmentation, } # Set of model topologies we support associated to the features supported by each topology and the factory @@ -236,7 +240,8 @@ class FeaturesManager: "data2vec-vision": supported_features_mapping( "default", "image-classification", - "image-segmentation", + # ONNX doesn't support `adaptive_avg_pool2d` yet + # "semantic-segmentation", onnx_config_cls="models.data2vec.Data2VecVisionOnnxConfig", ), "deberta": supported_features_mapping( From 0f257a87749e0a72bda260c6f319a45dae1e7c4d Mon Sep 17 00:00:00 2001 From: tgadeliya <32731151+tgadeliya@users.noreply.github.com> Date: Mon, 22 Aug 2022 12:13:20 +0200 Subject: [PATCH 127/539] Add missing tokenizer tests - Longformer (#17677) --- .../test_tokenization_longformer.py | 305 ++++++++++++++++++ 1 file changed, 305 insertions(+) create mode 100644 tests/models/longformer/test_tokenization_longformer.py diff --git a/tests/models/longformer/test_tokenization_longformer.py b/tests/models/longformer/test_tokenization_longformer.py new file mode 100644 index 00000000000000..2397a40bafa6b1 --- /dev/null +++ b/tests/models/longformer/test_tokenization_longformer.py @@ -0,0 +1,305 @@ +# coding=utf-8 +# Copyright 2022 Tsimur Hadeliya. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the Longformer tokenizer. """ + + +import itertools +import json +import os +import unittest + +from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast +from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES +from transformers.testing_utils import require_tokenizers, slow + +from ...test_tokenization_common import TokenizerTesterMixin + + +# Copied from transformers.tests.roberta.test_modeling_roberta.py with Roberta->Longformer +@require_tokenizers +class LongformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase): + tokenizer_class = LongformerTokenizer + test_slow_tokenizer = True + rust_tokenizer_class = LongformerTokenizerFast + test_rust_tokenizer = True + + def setUp(self): + super().setUp() + + # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt + vocab = [ + "l", + "o", + "w", + "e", + "r", + "s", + "t", + "i", + "d", + "n", + "\u0120", + "\u0120l", + "\u0120n", + "\u0120lo", + "\u0120low", + "er", + "\u0120lowest", + "\u0120newer", + "\u0120wider", + "", + ] + vocab_tokens = dict(zip(vocab, range(len(vocab)))) + merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] + self.special_tokens_map = {"unk_token": ""} + + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) + with open(self.vocab_file, "w", encoding="utf-8") as fp: + fp.write(json.dumps(vocab_tokens) + "\n") + with open(self.merges_file, "w", encoding="utf-8") as fp: + fp.write("\n".join(merges)) + + def get_tokenizer(self, **kwargs): + kwargs.update(self.special_tokens_map) + return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) + + def get_rust_tokenizer(self, **kwargs): + kwargs.update(self.special_tokens_map) + return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) + + def get_input_output_texts(self, tokenizer): + input_text = "lower newer" + output_text = "lower newer" + return input_text, output_text + + def test_full_tokenizer(self): + tokenizer = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map) + text = "lower newer" + bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] + tokens = tokenizer.tokenize(text) # , add_prefix_space=True) + self.assertListEqual(tokens, bpe_tokens) + + input_tokens = tokens + [tokenizer.unk_token] + input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] + self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) + + def longformer_dict_integration_testing(self): + tokenizer = self.get_tokenizer() + + self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2]) + self.assertListEqual( + tokenizer.encode("Hello world! cécé herlolip 418", add_special_tokens=False), + [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2], + ) + + @slow + def test_sequence_builders(self): + tokenizer = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096") + + text = tokenizer.encode("sequence builders", add_special_tokens=False) + text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) + + encoded_text_from_decode = tokenizer.encode( + "sequence builders", add_special_tokens=True, add_prefix_space=False + ) + encoded_pair_from_decode = tokenizer.encode( + "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False + ) + + encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) + encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) + + assert encoded_sentence == encoded_text_from_decode + assert encoded_pair == encoded_pair_from_decode + + def test_space_encoding(self): + tokenizer = self.get_tokenizer() + + sequence = "Encode this sequence." + space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]] + + # Testing encoder arguments + encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False) + first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] + self.assertNotEqual(first_char, space_encoding) + + encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) + first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] + self.assertEqual(first_char, space_encoding) + + tokenizer.add_special_tokens({"bos_token": ""}) + encoded = tokenizer.encode(sequence, add_special_tokens=True) + first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0] + self.assertNotEqual(first_char, space_encoding) + + # Testing spaces after special tokens + mask = "" + tokenizer.add_special_tokens( + {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)} + ) # mask token has a left space + mask_ind = tokenizer.convert_tokens_to_ids(mask) + + sequence = "Encode sequence" + sequence_nospace = "Encode sequence" + + encoded = tokenizer.encode(sequence) + mask_loc = encoded.index(mask_ind) + first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] + self.assertEqual(first_char, space_encoding) + + encoded = tokenizer.encode(sequence_nospace) + mask_loc = encoded.index(mask_ind) + first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] + self.assertNotEqual(first_char, space_encoding) + + def test_pretokenized_inputs(self): + pass + + def test_embeded_special_tokens(self): + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) + sentence = "A, AllenNLP sentence." + tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) + tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) + + # token_type_ids should put 0 everywhere + self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) + + # attention_mask should put 1 everywhere, so sum over length should be 1 + self.assertEqual( + sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), + sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), + ) + + tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) + tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) + + # Rust correctly handles the space before the mask while python doesnt + self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) + self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) + + self.assertSequenceEqual( + tokens_p_str, ["", "A", ",", "", "ĠAllen", "N", "LP", "Ġsentence", ".", ""] + ) + self.assertSequenceEqual( + tokens_r_str, ["", "A", ",", "", "ĠAllen", "N", "LP", "Ġsentence", ".", ""] + ) + + def test_change_add_prefix_space_and_trim_offsets_args(self): + for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2): + tokenizer_r = self.rust_tokenizer_class.from_pretrained( + self.tmpdirname, use_fast=True, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets + ) + + pre_tokenizer_state = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__()) + post_processor_state = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__()) + + self.assertEqual(pre_tokenizer_state["add_prefix_space"], add_prefix_space) + + self.assertEqual(post_processor_state["add_prefix_space"], add_prefix_space) + self.assertEqual(post_processor_state["trim_offsets"], trim_offsets) + + def test_offsets_mapping_with_different_add_prefix_space_and_trim_space_arguments(self): + # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and + # `trim_offsets` + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + text_of_1_token = "hello" # `hello` is a token in the vocabulary of `pretrained_name` + text = f"{text_of_1_token} {text_of_1_token}" + + tokenizer_r = self.rust_tokenizer_class.from_pretrained( + pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True + ) + encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) + self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) + self.assertEqual( + encoding.offset_mapping[1], + (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)), + ) + + tokenizer_r = self.rust_tokenizer_class.from_pretrained( + pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True + ) + encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) + self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) + self.assertEqual( + encoding.offset_mapping[1], + (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)), + ) + + tokenizer_r = self.rust_tokenizer_class.from_pretrained( + pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False + ) + encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) + self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) + self.assertEqual( + encoding.offset_mapping[1], + (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)), + ) + + tokenizer_r = self.rust_tokenizer_class.from_pretrained( + pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False + ) + encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) + self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) + self.assertEqual( + encoding.offset_mapping[1], + (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)), + ) + + text = f" {text}" + + # tokenizer_r = self.rust_tokenizer_class.from_pretrained( + # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True + # ) + # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) + # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) + # self.assertEqual( + # encoding.offset_mapping[1], + # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), + # ) + + tokenizer_r = self.rust_tokenizer_class.from_pretrained( + pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True + ) + encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) + self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) + self.assertEqual( + encoding.offset_mapping[1], + (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), + ) + + tokenizer_r = self.rust_tokenizer_class.from_pretrained( + pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False + ) + encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) + self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token))) + self.assertEqual( + encoding.offset_mapping[1], + (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), + ) + + tokenizer_r = self.rust_tokenizer_class.from_pretrained( + pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False + ) + encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) + self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token))) + self.assertEqual( + encoding.offset_mapping[1], + (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), + ) From d90a36d192e2981a41122c30a765c63158dd0557 Mon Sep 17 00:00:00 2001 From: Atharva Ingle Date: Mon, 22 Aug 2022 20:46:27 +0530 Subject: [PATCH 128/539] remove check for main process for trackers initialization (#18706) --- .../run_image_classification_no_trainer.py | 12 +++++------- .../pytorch/language-modeling/run_clm_no_trainer.py | 12 +++++------- .../pytorch/language-modeling/run_mlm_no_trainer.py | 12 +++++------- .../pytorch/multiple-choice/run_swag_no_trainer.py | 12 +++++------- .../pytorch/question-answering/run_qa_no_trainer.py | 12 +++++------- .../run_semantic_segmentation_no_trainer.py | 12 +++++------- .../summarization/run_summarization_no_trainer.py | 12 +++++------- .../text-classification/run_glue_no_trainer.py | 12 +++++------- .../token-classification/run_ner_no_trainer.py | 12 +++++------- 9 files changed, 45 insertions(+), 63 deletions(-) diff --git a/examples/pytorch/image-classification/run_image_classification_no_trainer.py b/examples/pytorch/image-classification/run_image_classification_no_trainer.py index 1bd190d1303e9a..69ee2875e61d1c 100644 --- a/examples/pytorch/image-classification/run_image_classification_no_trainer.py +++ b/examples/pytorch/image-classification/run_image_classification_no_trainer.py @@ -414,14 +414,12 @@ def collate_fn(examples): checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. + # The trackers initializes automatically on the main process. if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("image_classification_no_trainer", experiment_config) + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("image_classification_no_trainer", experiment_config) # Get the metric function metric = evaluate.load("accuracy") diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index 225b88a49440cc..d00d0792ab90ee 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -516,14 +516,12 @@ def group_texts(examples): checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. + # The trackers initializes automatically on the main process. if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("clm_no_trainer", experiment_config) + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("clm_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index c01f870cdd4d1c..7da09e16c5914e 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -560,14 +560,12 @@ def group_texts(examples): checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. + # The trackers initializes automatically on the main process. if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("mlm_no_trainer", experiment_config) + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("mlm_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps diff --git a/examples/pytorch/multiple-choice/run_swag_no_trainer.py b/examples/pytorch/multiple-choice/run_swag_no_trainer.py index eeb04e417fdfd6..4cf2c9cc440204 100755 --- a/examples/pytorch/multiple-choice/run_swag_no_trainer.py +++ b/examples/pytorch/multiple-choice/run_swag_no_trainer.py @@ -513,14 +513,12 @@ def preprocess_function(examples): checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. + # The trackers initializes automatically on the main process. if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("swag_no_trainer", experiment_config) + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("swag_no_trainer", experiment_config) # Metrics metric = evaluate.load("accuracy") diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index 6bf4eb28e99418..fb6e1e96d52582 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -787,14 +787,12 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. + # The trackers initializes automatically on the main process. if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("qa_no_trainer", experiment_config) + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("qa_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index 30cb7cc53ae318..a6550176aa37dc 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -512,14 +512,12 @@ def preprocess_val(example_batch): metric = evaluate.load("mean_iou") # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. + # The trackers initializes automatically on the main process. if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("semantic_segmentation_no_trainer", experiment_config) + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("semantic_segmentation_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index 96781b6dcadbdd..b75b4bf7d4c13a 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -581,14 +581,12 @@ def postprocess_text(preds, labels): checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. + # The trackers initializes automatically on the main process. if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("summarization_no_trainer", experiment_config) + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("summarization_no_trainer", experiment_config) # Metric metric = evaluate.load("rouge") diff --git a/examples/pytorch/text-classification/run_glue_no_trainer.py b/examples/pytorch/text-classification/run_glue_no_trainer.py index f74e5520699bb9..1213460cfb970e 100644 --- a/examples/pytorch/text-classification/run_glue_no_trainer.py +++ b/examples/pytorch/text-classification/run_glue_no_trainer.py @@ -459,14 +459,12 @@ def preprocess_function(examples): checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. + # The trackers initializes automatically on the main process. if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("glue_no_trainer", experiment_config) + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("glue_no_trainer", experiment_config) # Get the metric function if args.task_name is not None: diff --git a/examples/pytorch/token-classification/run_ner_no_trainer.py b/examples/pytorch/token-classification/run_ner_no_trainer.py index 937abb718e72b1..1156e1f4f01bed 100755 --- a/examples/pytorch/token-classification/run_ner_no_trainer.py +++ b/examples/pytorch/token-classification/run_ner_no_trainer.py @@ -574,14 +574,12 @@ def tokenize_and_align_labels(examples): checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration. - # We initialize the trackers only on main process because `accelerator.log` - # only logs on main process and we don't want empty logs/runs on other processes. + # The trackers initializes automatically on the main process. if args.with_tracking: - if accelerator.is_main_process: - experiment_config = vars(args) - # TensorBoard cannot log Enums, need the raw value - experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value - accelerator.init_trackers("ner_no_trainer", experiment_config) + experiment_config = vars(args) + # TensorBoard cannot log Enums, need the raw value + experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value + accelerator.init_trackers("ner_no_trainer", experiment_config) # Metrics metric = evaluate.load("seqeval") From 84beb8a49bf137a88d1b29ab3a85ba0a3cd097d5 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 23 Aug 2022 11:10:07 +0200 Subject: [PATCH 129/539] Unpin detectron2 (#18727) Co-authored-by: ydshieh --- .circleci/config.yml | 2 +- docker/transformers-all-latest-gpu/Dockerfile | 2 +- docker/transformers-doc-builder/Dockerfile | 2 +- docker/transformers-pytorch-gpu/Dockerfile | 2 +- .../models/layoutlmv2/modeling_layoutlmv2.py | 18 ++---------------- 5 files changed, 6 insertions(+), 20 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 3e5c6aaa88581a..3b895d0dd17100 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1006,7 +1006,7 @@ jobs: # The commit `36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0` in `detectron2` break things. # See https://github.com/facebookresearch/detectron2/commit/36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0#comments. # TODO: Revert this change back once the above issue is fixed. - - run: python -m pip install 'git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13' + - run: python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' - run: sudo apt install tesseract-ocr - run: pip install pytesseract - save_cache: diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index 76c97fab2a6596..4db6f51826f02b 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -40,7 +40,7 @@ RUN python3 -m pip uninstall -y flax jax RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python3 -c "from torch import version; print(version.__version__.split('+')[0])")+$CUDA.html RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT+cpu -f https://software.intel.com/ipex-whl-stable -RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13 pytesseract https://github.com/kpu/kenlm/archive/master.zip +RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip RUN python3 -m pip install -U "itsdangerous<2.1.0" RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate diff --git a/docker/transformers-doc-builder/Dockerfile b/docker/transformers-doc-builder/Dockerfile index 15b643996558c0..de0eb1713727d9 100644 --- a/docker/transformers-doc-builder/Dockerfile +++ b/docker/transformers-doc-builder/Dockerfile @@ -11,7 +11,7 @@ RUN apt-get -y update && apt-get install -y libsndfile1-dev && apt install -y te RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed] RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python -c "from torch import version; print(version.__version__.split('+')[0])")+cpu.html -RUN python3 -m pip install --no-cache-dir torchvision git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13 pytesseract https://github.com/kpu/kenlm/archive/master.zip +RUN python3 -m pip install --no-cache-dir torchvision git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip RUN python3 -m pip install --no-cache-dir pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com RUN python3 -m pip install -U "itsdangerous<2.1.0" diff --git a/docker/transformers-pytorch-gpu/Dockerfile b/docker/transformers-pytorch-gpu/Dockerfile index c4a93b2e2c702c..668bec3e715d86 100644 --- a/docker/transformers-pytorch-gpu/Dockerfile +++ b/docker/transformers-pytorch-gpu/Dockerfile @@ -23,7 +23,7 @@ RUN [ ${#TORCH_AUDIO} -gt 0 ] && VERSION='torchaudio=='TORCH_AUDIO'.*' || VERSI RUN python3 -m pip uninstall -y tensorflow flax RUN python3 -m pip install --no-cache-dir torch-scatter -f https://data.pyg.org/whl/torch-$(python3 -c "from torch import version; print(version.__version__.split('+')[0])")+cu113.html -RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13 pytesseract https://github.com/kpu/kenlm/archive/master.zip +RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract https://github.com/kpu/kenlm/archive/master.zip RUN python3 -m pip install -U "itsdangerous<2.1.0" # When installing in editable mode, `transformers` is not recognized as a package. diff --git a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py index c4e6ba0ab5aea4..be31af99d6dfd8 100755 --- a/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py @@ -43,26 +43,12 @@ from .configuration_layoutlmv2 import LayoutLMv2Config -logger = logging.get_logger(__name__) - - # soft dependency if is_detectron2_available(): import detectron2 + from detectron2.modeling import META_ARCH_REGISTRY - try: - from detectron2.modeling import META_ARCH_REGISTRY - except ImportError: - # NOTE: This is a temporary fix because currently there are - # import problems when using detectron2 from master (see issues below) - # it's better to have a silent error here in case someone imports this file - # without using the model which without this hack would break. - logger.warning( - "The detectron2 import seems to be broken. See:" - "https://github.com/facebookresearch/detectron2/issues/4489 or" - "https://github.com/facebookresearch/detectron2/issues/4487" - ) - pass +logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "microsoft/layoutlmv2-base-uncased" _CONFIG_FOR_DOC = "LayoutLMv2Config" From 891704b3c21cef94ffc66786e99bcaad3fd492fb Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 23 Aug 2022 13:17:06 +0200 Subject: [PATCH 130/539] Removing warning of model type for `microsoft/tapex-base-finetuned-wtq` (#18711) and friends. --- .../pipelines/table_question_answering.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/transformers/pipelines/table_question_answering.py b/src/transformers/pipelines/table_question_answering.py index 25dcd320cf4f6a..a769815c61dece 100644 --- a/src/transformers/pipelines/table_question_answering.py +++ b/src/transformers/pipelines/table_question_answering.py @@ -16,14 +16,20 @@ if is_torch_available(): import torch - from ..models.auto.modeling_auto import MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING + from ..models.auto.modeling_auto import ( + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, + ) if is_tf_available() and is_tensorflow_probability_available(): import tensorflow as tf import tensorflow_probability as tfp - from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING + from ..models.auto.modeling_tf_auto import ( + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, + ) class TableQuestionAnsweringArgumentHandler(ArgumentHandler): @@ -100,9 +106,14 @@ def __init__(self, args_parser=TableQuestionAnsweringArgumentHandler(), *args, * self._args_parser = args_parser self.check_model_type( - TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING + dict( + TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.items() + + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items() + ) if self.framework == "tf" - else MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING + else dict( + MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING.items() + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items() + ) ) self.aggregate = bool(getattr(self.model.config, "aggregation_labels", None)) and bool( From 438698085cfb92b6b7e9b3b94a226d3a32b77a85 Mon Sep 17 00:00:00 2001 From: SaulLu <55560583+SaulLu@users.noreply.github.com> Date: Tue, 23 Aug 2022 13:23:51 +0200 Subject: [PATCH 131/539] improve `add_tokens` docstring (#18687) * improve add_tokens documentation * format --- src/transformers/tokenization_utils_base.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index 566fd3fbf92b05..fd8e1ee58551e4 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -915,10 +915,12 @@ def add_tokens( ) -> int: """ Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to - it with indices starting from length of the current vocabulary. + it with indices starting from length of the current vocabulary and and will be isolated before the tokenization + algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore + not treated in the same way. - Note,None When adding new tokens to the vocabulary, you should make sure to also resize the token embedding - matrix of the model so that its embedding matrix matches the tokenizer. + Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix + of the model so that its embedding matrix matches the tokenizer. In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method. From 6faf283288ce3390281ad8c1d37ccb13f2d03990 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 23 Aug 2022 15:38:59 +0100 Subject: [PATCH 132/539] CLI: Don't check the model head when there is no model head (#18733) --- src/transformers/commands/pt_to_tf.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/src/transformers/commands/pt_to_tf.py b/src/transformers/commands/pt_to_tf.py index 57c03c2746dbf7..fdd423430ee212 100644 --- a/src/transformers/commands/pt_to_tf.py +++ b/src/transformers/commands/pt_to_tf.py @@ -286,7 +286,12 @@ def run(self): crossload_differences = self.find_pt_tf_differences(pt_outputs, tf_from_pt_outputs) output_differences = {k: v for k, v in crossload_differences.items() if "hidden" not in k} hidden_differences = {k: v for k, v in crossload_differences.items() if "hidden" in k} - max_crossload_output_diff = max(output_differences.values()) + if len(output_differences) == 0 and architectures is not None: + raise ValueError( + f"Something went wrong -- the config file has architectures ({architectures}), but no model head" + " output was found. All outputs start with 'hidden'" + ) + max_crossload_output_diff = max(output_differences.values()) if output_differences else 0.0 max_crossload_hidden_diff = max(hidden_differences.values()) if max_crossload_output_diff > MAX_ERROR or max_crossload_hidden_diff > self._max_hidden_error: raise ValueError( @@ -310,7 +315,12 @@ def run(self): conversion_differences = self.find_pt_tf_differences(pt_outputs, tf_outputs) output_differences = {k: v for k, v in conversion_differences.items() if "hidden" not in k} hidden_differences = {k: v for k, v in conversion_differences.items() if "hidden" in k} - max_conversion_output_diff = max(output_differences.values()) + if len(output_differences) == 0 and architectures is not None: + raise ValueError( + f"Something went wrong -- the config file has architectures ({architectures}), but no model head" + " output was found. All outputs start with 'hidden'" + ) + max_conversion_output_diff = max(output_differences.values()) if output_differences else 0.0 max_conversion_hidden_diff = max(hidden_differences.values()) if max_conversion_output_diff > MAX_ERROR or max_conversion_hidden_diff > self._max_hidden_error: raise ValueError( From c12dbdc2462900b6dda20b61d5cf4c2faf989ba4 Mon Sep 17 00:00:00 2001 From: Mishig Davaadorj Date: Wed, 24 Aug 2022 10:37:52 +0200 Subject: [PATCH 133/539] Update perf_infer_gpu_many.mdx (#18744) --- docs/source/en/perf_infer_gpu_many.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/perf_infer_gpu_many.mdx b/docs/source/en/perf_infer_gpu_many.mdx index 2f29c3c77bc343..b3331d1f12a34b 100644 --- a/docs/source/en/perf_infer_gpu_many.mdx +++ b/docs/source/en/perf_infer_gpu_many.mdx @@ -14,6 +14,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o This document contains information on how to efficiently infer on a multiple GPUs. -Note: A multi GPU setup can use the majority of the strategies described in the [single GPU section](perf infer gpu one). You must be aware of simple techniques, though, that can be used for a better usage. +Note: A multi GPU setup can use the majority of the strategies described in the [single GPU section](./perf_infer_gpu_one). You must be aware of simple techniques, though, that can be used for a better usage. From a442884b8790f99fed8fa8500355ae4a2b559967 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Constantin=20H=C3=BCtterer?= <60255589+constantin-huetterer@users.noreply.github.com> Date: Wed, 24 Aug 2022 11:07:17 +0200 Subject: [PATCH 134/539] Add minor doc-string change to include hp_name param in hyperparameter_search (#18700) * Add minor doc-string change to include hp_name * fix: missing type-information for kwargs * fix: missing white-space in hyperparameter_search doc-strings --- src/transformers/trainer.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 64d5a3fadf4d6d..0fe0c1b84ee7f2 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -2306,13 +2306,15 @@ def hyperparameter_search( method. Will default to [`~trainer_utils.default_compute_objective`]. n_trials (`int`, *optional*, defaults to 100): The number of trial runs to test. - direction(`str`, *optional*, defaults to `"minimize"`): + direction (`str`, *optional*, defaults to `"minimize"`): Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. - backend(`str` or [`~training_utils.HPSearchBackend`], *optional*): + backend (`str` or [`~training_utils.HPSearchBackend`], *optional*): The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending on which one is installed. If all are installed, will default to optuna. - kwargs: + hp_name (`Callable[["optuna.Trial"], str]]`, *optional*): + A function that defines the trial/run name. Will default to None. + kwargs (`Dict[str, Any]`, *optional*): Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more information see: From cecf9f9b2748c760287473ef9c1b61bddc787f58 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 24 Aug 2022 11:38:03 +0200 Subject: [PATCH 135/539] fix pipeline_tutorial.mdx doctest (#18717) Co-authored-by: ydshieh --- docs/source/en/pipeline_tutorial.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/pipeline_tutorial.mdx b/docs/source/en/pipeline_tutorial.mdx index 95585b64359f49..650dbd9520abb2 100644 --- a/docs/source/en/pipeline_tutorial.mdx +++ b/docs/source/en/pipeline_tutorial.mdx @@ -167,5 +167,5 @@ Create a pipeline for `vqa` and pass it the image and question: >>> preds = vqa(image=image, question=question) >>> preds = [{"score": round(pred["score"], 4), "answer": pred["answer"]} for pred in preds] >>> preds -[{'score': 0.9112, 'answer': 'snow'}, {'score': 0.8796, 'answer': 'in snow'}, {'score': 0.6717, 'answer': 'outside'}, {'score': 0.0291, 'answer': 'on ground'}, {'score': 0.027, 'answer': 'ground'}] -``` \ No newline at end of file +[{'score': 0.911, 'answer': 'snow'}, {'score': 0.8786, 'answer': 'in snow'}, {'score': 0.6714, 'answer': 'outside'}, {'score': 0.0293, 'answer': 'on ground'}, {'score': 0.0272, 'answer': 'ground'}] +``` From c72d7d91bf4899760725793421eff9da640c8527 Mon Sep 17 00:00:00 2001 From: Daniel Stancl <46073029+stancld@users.noreply.github.com> Date: Wed, 24 Aug 2022 11:51:05 +0200 Subject: [PATCH 136/539] Add TF implementation of `XGLMModel` (#16543) * Add TFXGLM models * Add todo: self.supports_xla_generation = False Co-authored-by: Daniel Stancl Co-authored-by: Daniel Stancl Co-authored-by: Joao Gante Co-authored-by: Daniel Co-authored-by: Patrick von Platen --- docs/source/en/index.mdx | 2 +- docs/source/en/model_doc/xglm.mdx | 10 + src/transformers/__init__.py | 14 + .../models/auto/modeling_tf_auto.py | 2 + src/transformers/models/xglm/__init__.py | 28 + .../models/xglm/modeling_tf_xglm.py | 1000 +++++++++++++++++ src/transformers/utils/dummy_tf_objects.py | 24 + tests/models/xglm/test_modeling_tf_xglm.py | 284 +++++ 8 files changed, 1363 insertions(+), 1 deletion(-) create mode 100644 src/transformers/models/xglm/modeling_tf_xglm.py create mode 100644 tests/models/xglm/test_modeling_tf_xglm.py diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 257eba8171ed1c..17c04376780afe 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -312,7 +312,7 @@ Flax), PyTorch, and/or TensorFlow. | Wav2Vec2 | ✅ | ❌ | ✅ | ✅ | ✅ | | Wav2Vec2-Conformer | ❌ | ❌ | ✅ | ❌ | ❌ | | WavLM | ❌ | ❌ | ✅ | ❌ | ❌ | -| XGLM | ✅ | ✅ | ✅ | ❌ | ✅ | +| XGLM | ✅ | ✅ | ✅ | ✅ | ✅ | | XLM | ✅ | ❌ | ✅ | ✅ | ❌ | | XLM-ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ | | XLM-RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ | diff --git a/docs/source/en/model_doc/xglm.mdx b/docs/source/en/model_doc/xglm.mdx index b8c395ce021133..e35bab25f89c4e 100644 --- a/docs/source/en/model_doc/xglm.mdx +++ b/docs/source/en/model_doc/xglm.mdx @@ -64,6 +64,16 @@ This model was contributed by [Suraj](https://huggingface.co/valhalla). The orig [[autodoc]] XGLMForCausalLM - forward +## TFXGLMModel + +[[autodoc]] TFXGLMModel + - call + +## TFXGLMForCausalLM + +[[autodoc]] TFXGLMForCausalLM + - call + ## FlaxXGLMModel [[autodoc]] FlaxXGLMModel diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index d6444e0844ff54..3281d266a2f3ce 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -2567,6 +2567,14 @@ "TFWav2Vec2PreTrainedModel", ] ) + _import_structure["models.xglm"].extend( + [ + "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFXGLMForCausalLM", + "TFXGLMModel", + "TFXGLMPreTrainedModel", + ] + ) _import_structure["models.xlm"].extend( [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -4954,6 +4962,12 @@ TFWav2Vec2Model, TFWav2Vec2PreTrainedModel, ) + from .models.xglm import ( + TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXGLMForCausalLM, + TFXGLMModel, + TFXGLMPreTrainedModel, + ) from .models.xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index 6f9b15c131d60e..359e1f05c47b55 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -77,6 +77,7 @@ ("vit", "TFViTModel"), ("vit_mae", "TFViTMAEModel"), ("wav2vec2", "TFWav2Vec2Model"), + ("xglm", "TFXGLMModel"), ("xlm", "TFXLMModel"), ("xlm-roberta", "TFXLMRobertaModel"), ("xlnet", "TFXLNetModel"), @@ -161,6 +162,7 @@ ("roberta", "TFRobertaForCausalLM"), ("roformer", "TFRoFormerForCausalLM"), ("transfo-xl", "TFTransfoXLLMHeadModel"), + ("xglm", "TFXGLMForCausalLM"), ("xlm", "TFXLMWithLMHeadModel"), ("xlnet", "TFXLNetLMHeadModel"), ] diff --git a/src/transformers/models/xglm/__init__.py b/src/transformers/models/xglm/__init__.py index 2ab60e4cb4bbc9..096886e5bd329e 100644 --- a/src/transformers/models/xglm/__init__.py +++ b/src/transformers/models/xglm/__init__.py @@ -23,6 +23,7 @@ _LazyModule, is_flax_available, is_sentencepiece_available, + is_tf_available, is_tokenizers_available, is_torch_available, ) @@ -73,6 +74,20 @@ ] +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_xglm"] = [ + "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFXGLMForCausalLM", + "TFXGLMModel", + "TFXGLMPreTrainedModel", + ] + + if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig @@ -108,6 +123,19 @@ else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_xglm import ( + TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXGLMForCausalLM, + TFXGLMModel, + TFXGLMPreTrainedModel, + ) + else: import sys diff --git a/src/transformers/models/xglm/modeling_tf_xglm.py b/src/transformers/models/xglm/modeling_tf_xglm.py new file mode 100644 index 00000000000000..1b6828b09817ad --- /dev/null +++ b/src/transformers/models/xglm/modeling_tf_xglm.py @@ -0,0 +1,1000 @@ +# coding=utf-8 +# Copyright 2021 The Fairseq Authors The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 XGLM model.""" + + +import math +import random +from typing import Any, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation + +# Public API +from ...file_utils import ( + DUMMY_INPUTS, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + replace_return_docstrings, +) +from ...modeling_tf_outputs import TFBaseModelOutputWithPastAndCrossAttentions, TFCausalLMOutputWithCrossAttentions +from ...modeling_tf_utils import ( + TFCausalLanguageModelingLoss, + TFModelInputType, + TFPreTrainedModel, + TFSharedEmbeddings, + get_initializer, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import shape_list, stable_softmax +from ...utils import logging +from .configuration_xglm import XGLMConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "facebook/xglm-564M" +_CONFIG_FOR_DOC = "XGLMConfig" +_TOKENIZER_FOR_DOC = "XGLMTokenizer" + + +TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "facebook/xglm-564M", + # See all XGLM models at https://huggingface.co/models?filter=xglm +] + + +LARGE_NEGATIVE = -1e8 + + +def create_sinusiodal_positions(num_positions: int, embedding_dim: int, padding_idx: Optional[int]) -> tf.Tensor: + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = tf.exp(tf.range(half_dim, dtype=tf.float32) * -emb) + emb = tf.expand_dims(tf.range(num_positions, dtype=tf.float32), axis=1) * tf.expand_dims(emb, axis=0) + emb = tf.reshape(tf.concat([tf.sin(emb), tf.cos(emb)], axis=1), (num_positions, -1)) + if embedding_dim % 2 == 1: + # zero pad + emb = tf.concat([emb, tf.zeros((num_positions, 1))], axis=1) + if padding_idx is not None: + _padding_mask = tf.concat( + [ + tf.ones((padding_idx, shape_list(emb)[1])), + tf.zeros((1, shape_list(emb)[1])), + tf.ones((shape_list(emb)[0] - padding_idx - 1, shape_list(emb)[1])), + ], + axis=0, + ) + emb *= _padding_mask + + return tf.Variable(emb, trainable=False, name="model.embed_positions.weights") + + +def _create_position_ids_from_input_ids( + input_ids: tf.Tensor, past_key_values_length: int, padding_idx: Optional[int] +) -> tf.Tensor: + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = tf.where(input_ids != padding_idx, 1, 0) + incremental_indices = (tf.cast(tf.cumsum(mask, axis=1), dtype=mask.dtype) + past_key_values_length) * mask + return tf.cast(incremental_indices, dtype=tf.int64) + padding_idx + + +def _create_position_ids_from_inputs_embeds( + inputs_embeds: tf.Tensor, past_key_values_length: int, padding_idx: Optional[int] +) -> tf.Tensor: + """ + Args: + We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. + inputs_embeds: tf.Tensor + Returns: tf.Tensor + """ + input_shape = shape_list(inputs_embeds)[:-1] + sequence_length = input_shape[1] + + position_ids = tf.range(padding_idx + 1, sequence_length + padding_idx + 1, dtype=tf.int64) + + return tf.broadcast_to(tf.expand_dims(position_ids, axis=0), input_shape) + past_key_values_length + + +# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask +def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz = input_ids_shape[0] + tgt_len = input_ids_shape[1] + mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE + mask_cond = tf.range(shape_list(mask)[-1]) + + mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) + + if past_key_values_length > 0: + mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) + + return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) + + +# Copied from transformers.models.bart.modeling_tf_bart._expand_mask +def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + src_len = shape_list(mask)[1] + tgt_len = tgt_len if tgt_len is not None else src_len + one_cst = tf.constant(1.0) + mask = tf.cast(mask, dtype=one_cst.dtype) + expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) + + return (one_cst - expanded_mask) * LARGE_NEGATIVE + + +# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->XGLM +class TFXGLMAttention(tf.keras.layers.Layer): + """Multi-headed attention from "Attention Is All You Need""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + **kwargs, + ): + super().__init__(**kwargs) + self.embed_dim = embed_dim + + self.num_heads = num_heads + self.dropout = tf.keras.layers.Dropout(dropout) + self.head_dim = embed_dim // num_heads + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") + self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") + self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") + self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") + + def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): + return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) + + def call( + self, + hidden_states: tf.Tensor, + key_value_states: Optional[tf.Tensor] = None, + past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None, + attention_mask: Optional[tf.Tensor] = None, + layer_head_mask: Optional[tf.Tensor] = None, + training: Optional[bool] = False, + ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + bsz, tgt_len, embed_dim = shape_list(hidden_states) + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = tf.concat([past_key_value[0], key_states], axis=2) + value_states = tf.concat([past_key_value[1], value_states], axis=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) + key_states = tf.reshape(key_states, proj_shape) + value_states = tf.reshape(value_states, proj_shape) + + src_len = shape_list(key_states)[1] + attn_weights = tf.matmul(query_states, key_states, transpose_b=True) + + # The tf.debugging asserts are not compliant with XLA then they + # have to be disabled in other modes than eager. + if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: + # The tf.debugging asserts are not compliant with XLA then they + # have to be disabled in other modes than eager. + if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], + message=( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" + ), + ) + + attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) + attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask + attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) + + attn_weights = stable_softmax(attn_weights, axis=-1) + + if layer_head_mask is not None: + # The tf.debugging asserts are not compliant with XLA then they + # have to be disabled in other modes than eager. + if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) + + attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( + attn_weights, (bsz, self.num_heads, tgt_len, src_len) + ) + attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) + + attn_probs = self.dropout(attn_weights, training=training) + attn_output = tf.matmul(attn_probs, value_states) + + # The tf.debugging asserts are not compliant with XLA then they + # have to be disabled in other modes than eager. + if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) + + attn_output = tf.transpose( + tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) + ) + attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) + + attn_output = self.out_proj(attn_output) + attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + + return attn_output, attn_weights, past_key_value + + +class TFXGLMDecoderLayer(tf.keras.layers.Layer): + def __init__(self, config: XGLMConfig, **kwargs: Any) -> None: + super().__init__(**kwargs) + self.embed_dim = config.d_model + self.self_attn = TFXGLMAttention( + embed_dim=self.embed_dim, + num_heads=config.attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + name="self_attn", + ) + self.dropout = tf.keras.layers.Dropout(config.dropout) + self.activation_fn = get_tf_activation(config.activation_function) + self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) + + if config.add_cross_attention: + self.encoder_attn = TFXGLMAttention( + embed_dim=self.embed_dim, + num_heads=config.attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + name="encoder_attn", + ) + self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization( + epsilon=1e-5, name="encoder_attn_layer_norm" + ) + + self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") + self.fc1 = tf.keras.layers.Dense(config.ffn_dim, name="fc1") + self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") + self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") + + # Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer.call + def call( + self, + hidden_states: tf.Tensor, + attention_mask: Optional[tf.Tensor] = None, + encoder_hidden_states: Optional[tf.Tensor] = None, + encoder_attention_mask: Optional[tf.Tensor] = None, + layer_head_mask: Optional[tf.Tensor] = None, + cross_attn_layer_head_mask: Optional[tf.Tensor] = None, + past_key_value: Optional[Tuple[tf.Tensor]] = None, + training: Optional[bool] = False, + ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)* + attention_mask (`tf.Tensor`): attention mask of size + *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. + encoder_hidden_states (`tf.Tensor`): + cross attention input to the layer of shape *(seq_len, batch, embed_dim)* + encoder_attention_mask (`tf.Tensor`): encoder attention mask of size + *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. + layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size + *(decoder_attention_heads,)* + cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. + *(decoder_attention_heads,)* + past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + ) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + ) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout(hidden_states, training=training) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + + return ( + hidden_states, + self_attn_weights, + cross_attn_weights, + present_key_value, + ) + + +@keras_serializable +class TFXGLMMainLayer(tf.keras.layers.Layer): + config_class = XGLMConfig + + def __init__( + self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs, **kwargs: Any + ) -> None: + super().__init__(*inputs, **kwargs) + + self.config = config + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 + + if embed_tokens is not None: + self.embed_tokens = embed_tokens + else: + self.embed_tokens = TFSharedEmbeddings( + config.vocab_size, config.d_model, self.padding_idx, name="embed_tokens" + ) + + self.offset = 2 + self._embed_positions_weights = create_sinusiodal_positions( + num_positions=config.max_position_embeddings + self.offset, + embedding_dim=config.d_model, + padding_idx=config.pad_token_id, + ) + + self.dropout = tf.keras.layers.Dropout(config.dropout) + self.layers = [TFXGLMDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_layers)] + self.layerdrop = config.layerdrop + self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") + + def get_input_embeddings(self) -> TFSharedEmbeddings: + return self.embed_tokens + + def set_input_embeddings(self, value: TFSharedEmbeddings) -> None: + self.embed_tokens = value + + def _prepare_decoder_attention_mask( + self, + attention_mask: Optional[tf.Tensor], + input_shape: tf.TensorShape, + past_key_values_length: int, + ) -> tf.Tensor: + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask: Optional[tf.Tensor] = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length) + + if attention_mask is not None: + expand_attention_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) + combined_attention_mask = ( + expand_attention_mask + if combined_attention_mask is None + else expand_attention_mask + combined_attention_mask + ) + + return combined_attention_mask + + def embed_positions( + self, + input_ids: Optional[TFModelInputType] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values_length: Optional[int] = None, + ) -> tf.Tensor: + if input_ids is not None: + position_ids = _create_position_ids_from_input_ids(input_ids, past_key_values_length, self.padding_idx) + else: + position_ids = _create_position_ids_from_inputs_embeds( + inputs_embeds, past_key_values_length, self.padding_idx + ) + + positions = tf.gather(self._embed_positions_weights, position_ids, axis=0) + return positions + + @unpack_inputs + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + **kwargs: Any, + ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + input_ids = tf.reshape(input_ids, (-1, input_shape[-1])) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) + + # embed positions + positions = self.embed_positions(input_ids, inputs_embeds, past_key_values_length) + + hidden_states = tf.cast(inputs_embeds, dtype=tf.float32) + positions + + hidden_states = self.dropout(hidden_states, training=training) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired + # The tf.debugging asserts are not compliant with XLA then they + # have to be disabled in other modes than eager. + for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: + if attn_mask is not None and tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_mask)[0], + len(self.layers), + message=( + f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" + f" {shape_list(attn_mask)[0]}." + ), + ) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + + dropout_probability = random.uniform(0, 1) + if training and (dropout_probability < self.layerdrop): + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), + past_key_value=past_key_value, + ) + + if use_cache: + next_decoder_cache += (present_key_value,) + + if output_attentions: + all_self_attns += (layer_self_attn,) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_cross_attn,) + + hidden_states = self.layer_norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return TFBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +class TFXGLMPreTrainedModel(TFPreTrainedModel): + config_class = XGLMConfig + base_model_prefix = "model" + + @property + def dummy_inputs(self): + pad_token = 1 + input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32) + dummy_inputs = { + "input_ids": input_ids, + "attention_mask": tf.math.not_equal(input_ids, pad_token), + } + return dummy_inputs + + @tf.function( + input_signature=[ + { + "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + } + ] + ) + def serving(self, inputs): + output = self.call(inputs) + + return self.serving_output(output) + + +XGLM_START_DOCSTRING = r""" + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TF 2.0 models accepts two formats as inputs: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional arguments. + + This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the + tensors in the first argument of the model call function: `model(inputs)`. + + If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the + first positional argument : + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + + + Args: + config ([`XGLMConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +XGLM_INPUTS_DOCSTRING = r""" + Args: + input_ids (`tf.Tensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`XGLMTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of + the decoder. + encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`tf.Tensor` of shape `(num_layers, attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`tf.Tensor` of shape `(num_layers, attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.num_layers`) + contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). Set to `False` during training, `True` during generation + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +@add_start_docstrings( + "The bare XGLM Model transformer outputting raw hidden-states without any specific head on top.", + XGLM_START_DOCSTRING, +) +class TFXGLMModel(TFXGLMPreTrainedModel): + """ + Transformer decoder consisting of *config.num_layers* layers. Each layer is a [`TFXGLMDecoderLayer`] + + Args: + config: XGLMConfig + embed_tokens: [TFSharedEmbeddings]: output embedding + """ + + def __init__( + self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs: Any, **kwargs: Any + ) -> None: + super().__init__(config, *inputs, **kwargs) + + self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name="model") + + @unpack_inputs + @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFBaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + **kwargs: Any, + ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: + + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def serving_output(self, output): + pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + cross_attns = ( + tf.convert_to_tensor(output.cross_attentions) + if self.config.output_attentions and self.config.add_cross_attention + else None + ) + + return TFBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=output.hidden_states, + past_key_values=pkv, + hidden_states=hs, + attentions=attns, + cross_attentions=cross_attns, + ) + + +@add_start_docstrings( + """ + The XGLM Model transformer with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + XGLM_START_DOCSTRING, +) +class TFXGLMForCausalLM(TFXGLMPreTrainedModel, TFCausalLanguageModelingLoss): + base_model_prefix = "model" + _keys_to_ignore_on_load_missing = [ + r"model.embed_positions.weights", + r"lm_head.weight", + ] + _keys_to_ignore_on_save = [ + r"model.embed_positions.weights", + ] + + def __init__( + self, config: XGLMConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, *inputs: Any, **kwargs: Any + ) -> None: + super().__init__(config, *inputs, **kwargs) + + self.model = TFXGLMMainLayer(config, embed_tokens=embed_tokens, name="model") + self.lm_head = tf.keras.layers.Dense( + config.vocab_size, + use_bias=False, + kernel_initializer=get_initializer(config.init_std), + name="lm_head", + ) + + # TODO (Joao): investigate why XGLM has numerical issues in XLA generate + self.supports_xla_generation = False + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def prepare_inputs_for_generation(self, inputs, past=None, use_cache=None, **kwargs): + # only last token for inputs_ids if past is defined in kwargs + if past: + inputs = tf.expand_dims(inputs[:, -1], -1) + + attention_mask = kwargs.get("attention_mask", None) + + return { + "input_ids": inputs, + "attention_mask": attention_mask, + "past": past, + "use_cache": use_cache, + } + + @unpack_inputs + @add_start_docstrings_to_model_forward(XGLM_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFCausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + cross_attn_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + **kwargs: Any, + ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: + r""" + labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + """ + + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + hidden_states = outputs[0] + lm_logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # shift labels to the left and cut last logit token + shifted_logits = lm_logits[:, :-1] + labels = labels[:, 1:] + loss = self.hf_compute_loss(labels, shifted_logits) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFCausalLMOutputWithCrossAttentions( + loss=loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def serving_output(self, output): + pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + cross_attns = ( + tf.convert_to_tensor(output.cross_attentions) + if self.config.output_attentions and self.config.add_cross_attention + else None + ) + + return TFCausalLMOutputWithCrossAttentions( + loss=output.loss, + logits=output.logits, + past_key_values=pkv, + hidden_states=hs, + attentions=attns, + cross_attentions=cross_attns, + ) + + @staticmethod + def _reorder_cache(past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),) + return reordered_past diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index fec5ffe700808a..5f8124ae558465 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -2270,6 +2270,30 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFXGLMForCausalLM(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXGLMModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFXGLMPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/xglm/test_modeling_tf_xglm.py b/tests/models/xglm/test_modeling_tf_xglm.py new file mode 100644 index 00000000000000..b6387901dc955b --- /dev/null +++ b/tests/models/xglm/test_modeling_tf_xglm.py @@ -0,0 +1,284 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from transformers import XGLMConfig, XGLMTokenizer, is_tf_available +from transformers.testing_utils import require_tf, slow + +from ...test_configuration_common import ConfigTester +from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask + + +if is_tf_available(): + import tensorflow as tf + + from transformers.models.xglm.modeling_tf_xglm import ( + TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, + TFXGLMForCausalLM, + TFXGLMModel, + ) + + +@require_tf +class TFXGLMModelTester: + config_cls = XGLMConfig + config_updates = {} + hidden_act = "gelu" + + def __init__( + self, + parent, + batch_size=14, + seq_length=7, + is_training=True, + use_input_mask=True, + use_labels=True, + vocab_size=99, + d_model=32, + num_hidden_layers=5, + num_attention_heads=4, + ffn_dim=37, + activation_function="gelu", + activation_dropout=0.1, + attention_dropout=0.1, + max_position_embeddings=512, + initializer_range=0.02, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = d_model + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.ffn_dim = ffn_dim + self.activation_function = activation_function + self.activation_dropout = activation_dropout + self.attention_dropout = attention_dropout + self.max_position_embeddings = max_position_embeddings + self.initializer_range = initializer_range + self.scope = None + self.bos_token_id = 0 + self.eos_token_id = 2 + self.pad_token_id = 1 + + def get_large_model_config(self): + return XGLMConfig.from_pretrained("facebook/xglm-564M") + + def prepare_config_and_inputs(self): + input_ids = tf.clip_by_value( + ids_tensor([self.batch_size, self.seq_length], self.vocab_size), clip_value_min=0, clip_value_max=3 + ) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + config = self.get_config() + + head_mask = floats_tensor([self.num_hidden_layers, self.num_attention_heads], 2) + + return ( + config, + input_ids, + input_mask, + head_mask, + ) + + def get_config(self): + return XGLMConfig( + vocab_size=self.vocab_size, + d_model=self.hidden_size, + num_layers=self.num_hidden_layers, + attention_heads=self.num_attention_heads, + ffn_dim=self.ffn_dim, + activation_function=self.activation_function, + activation_dropout=self.activation_dropout, + attention_dropout=self.attention_dropout, + max_position_embeddings=self.max_position_embeddings, + initializer_range=self.initializer_range, + use_cache=True, + bos_token_id=self.bos_token_id, + eos_token_id=self.eos_token_id, + pad_token_id=self.pad_token_id, + return_dict=True, + ) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + + ( + config, + input_ids, + input_mask, + head_mask, + ) = config_and_inputs + + inputs_dict = { + "input_ids": input_ids, + "head_mask": head_mask, + } + + return config, inputs_dict + + +@require_tf +class TFXGLMModelTest(TFModelTesterMixin, unittest.TestCase): + + all_model_classes = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () + all_generative_model_classes = (TFXGLMForCausalLM,) if is_tf_available() else () + test_onnx = False + test_missing_keys = False + test_pruning = False + + def setUp(self): + self.model_tester = TFXGLMModelTester(self) + self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_model_common_attributes(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) + + if model_class in self.all_generative_model_classes: + x = model.get_output_embeddings() + assert isinstance(x, tf.keras.layers.Layer) + name = model.get_bias() + assert name is None + else: + x = model.get_output_embeddings() + assert x is None + name = model.get_bias() + assert name is None + + @slow + def test_batch_generation(self): + model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") + tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") + + tokenizer.padding_side = "left" + + # use different length sentences to test batching + sentences = [ + "Hello, my dog is a little", + "Today, I", + ] + + inputs = tokenizer(sentences, return_tensors="tf", padding=True) + + outputs = model.generate(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"]) + + inputs_non_padded = tokenizer(sentences[0], return_tensors="tf").input_ids + output_non_padded = model.generate(input_ids=inputs_non_padded) + + num_paddings = ( + inputs_non_padded.shape[-1] + - tf.math.reduce_sum(tf.cast(inputs["attention_mask"][-1], dtype=tf.int64)).numpy() + ) + inputs_padded = tokenizer(sentences[1], return_tensors="tf").input_ids + output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings) + + batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True) + non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True) + padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True) + + expected_output_sentence = [ + "Hello, my dog is a little bit of a shy one, but he is very friendly", + "Today, I am going to share with you a few of my favorite things", + ] + self.assertListEqual(expected_output_sentence, batch_out_sentence) + self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence]) + + @slow + def test_model_from_pretrained(self): + for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = TFXGLMModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + @unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor.") + def test_resize_token_embeddings(self): + super().test_resize_token_embeddings() + + +@require_tf +class TFXGLMModelLanguageGenerationTest(unittest.TestCase): + @slow + def test_lm_generate_xglm(self, verify_outputs=True): + model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") + input_ids = tf.convert_to_tensor([[2, 268, 9865]], dtype=tf.int32) # The dog + # The dog is a very friendly dog. He is very affectionate and loves to play with other + # fmt: off + expected_output_ids = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] + # fmt: on + output_ids = model.generate(input_ids, do_sample=False, num_beams=1) + if verify_outputs: + self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids) + + @slow + def test_xglm_sample(self): + tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") + model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") + + tf.random.set_seed(0) + tokenized = tokenizer("Today is a nice day and", return_tensors="tf") + input_ids = tokenized.input_ids + output_ids = model.generate(input_ids, do_sample=True, seed=[7, 0]) + output_str = tokenizer.decode(output_ids[0], skip_special_tokens=True) + + EXPECTED_OUTPUT_STR = ( + "Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due" + ) + self.assertEqual(output_str, EXPECTED_OUTPUT_STR) + + @slow + def test_lm_generate_xglm_left_padding(self): + """Tests that the generated text is the same, regarless of left padding""" + tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-564M") + model = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M") + + tokenizer.padding_side = "left" + + generation_kwargs = { + "bad_words_ids": [tokenizer("is").input_ids, tokenizer("angry about").input_ids], + "no_repeat_ngram_size": 2, + "do_sample": False, + "repetition_penalty": 1.3, + } + expected_output_string = ( + "Today is a beautiful day and I am so glad that we have the opportunity to spend time with" + ) + + sentences = ["Today is a beautiful day and"] + input_ids = tokenizer(sentences, return_tensors="tf", padding=True) + # using default length + output_ids = model.generate(**input_ids, **generation_kwargs) + output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) + self.assertEqual(output_strings[0], expected_output_string) + + sentences = ["Today is a beautiful day and", "This is a very long input that we absolutely don't care about"] + input_ids = tokenizer(sentences, return_tensors="tf", padding=True) + # longer max length to capture the full length (remember: it is left padded) + output_ids = model.generate(**input_ids, **generation_kwargs, max_length=28) + output_strings = tokenizer.batch_decode(output_ids, skip_special_tokens=True) + self.assertEqual(output_strings[0], expected_output_string) From 5b249496699656616fbb44a5cd44c1750f33ea04 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Aug 2022 06:12:48 -0400 Subject: [PATCH 137/539] Bump nbconvert in /examples/research_projects/visual_bert (#18741) Bumps [nbconvert](https://github.com/jupyter/nbconvert) from 6.3.0 to 6.5.1. - [Release notes](https://github.com/jupyter/nbconvert/releases) - [Commits](https://github.com/jupyter/nbconvert/compare/6.3.0...6.5.1) --- updated-dependencies: - dependency-name: nbconvert dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/research_projects/visual_bert/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/visual_bert/requirements.txt b/examples/research_projects/visual_bert/requirements.txt index 9e7cc88ce0828f..a4f21439b59b56 100644 --- a/examples/research_projects/visual_bert/requirements.txt +++ b/examples/research_projects/visual_bert/requirements.txt @@ -43,7 +43,7 @@ matplotlib==3.3.1 mistune==2.0.3 msgpack==0.6.2 nbclient==0.5.0 -nbconvert==6.3.0 +nbconvert==6.5.1 nbformat==5.0.7 nest-asyncio==1.4.0 notebook==6.4.12 From e49c71fc4ce0a89e1e1b1a20ab42b6e3f0038e93 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Aug 2022 06:12:56 -0400 Subject: [PATCH 138/539] Bump nbconvert from 6.3.0 to 6.5.1 in /examples/research_projects/lxmert (#18742) Bumps [nbconvert](https://github.com/jupyter/nbconvert) from 6.3.0 to 6.5.1. - [Release notes](https://github.com/jupyter/nbconvert/releases) - [Commits](https://github.com/jupyter/nbconvert/compare/6.3.0...6.5.1) --- updated-dependencies: - dependency-name: nbconvert dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/research_projects/lxmert/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/lxmert/requirements.txt b/examples/research_projects/lxmert/requirements.txt index 9e7cc88ce0828f..a4f21439b59b56 100644 --- a/examples/research_projects/lxmert/requirements.txt +++ b/examples/research_projects/lxmert/requirements.txt @@ -43,7 +43,7 @@ matplotlib==3.3.1 mistune==2.0.3 msgpack==0.6.2 nbclient==0.5.0 -nbconvert==6.3.0 +nbconvert==6.5.1 nbformat==5.0.7 nest-asyncio==1.4.0 notebook==6.4.12 From dcff504e1806467965e2ac1f1e3864cddabaf31f Mon Sep 17 00:00:00 2001 From: Juyoung Kim <61103343+JadeKim042386@users.noreply.github.com> Date: Wed, 24 Aug 2022 19:20:27 +0900 Subject: [PATCH 139/539] fixed docstring typos (#18739) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fixed docstring typos * Added missing colon Co-authored-by: 김주영 --- src/transformers/models/bart/configuration_bart.py | 6 +++--- src/transformers/models/bert/tokenization_bert.py | 2 +- .../bigbird_pegasus/configuration_bigbird_pegasus.py | 4 ++-- .../models/blenderbot/configuration_blenderbot.py | 4 ++-- .../blenderbot_small/configuration_blenderbot_small.py | 4 ++-- src/transformers/models/detr/configuration_detr.py | 4 ++-- src/transformers/models/dpr/modeling_dpr.py | 2 +- src/transformers/models/dpr/modeling_tf_dpr.py | 2 +- .../models/encoder_decoder/modeling_encoder_decoder.py | 2 +- .../encoder_decoder/modeling_tf_encoder_decoder.py | 2 +- src/transformers/models/fsmt/configuration_fsmt.py | 4 ++-- .../models/layoutlmv2/tokenization_layoutlmv2.py | 2 +- .../models/layoutlmv2/tokenization_layoutlmv2_fast.py | 2 +- src/transformers/models/led/configuration_led.py | 4 ++-- src/transformers/models/m2m_100/configuration_m2m_100.py | 4 ++-- src/transformers/models/marian/configuration_marian.py | 4 ++-- src/transformers/models/mbart/configuration_mbart.py | 4 ++-- src/transformers/models/mpnet/tokenization_mpnet.py | 2 +- src/transformers/models/pegasus/configuration_pegasus.py | 4 ++-- src/transformers/models/plbart/configuration_plbart.py | 4 ++-- .../models/prophetnet/tokenization_prophetnet.py | 2 +- src/transformers/models/rag/configuration_rag.py | 2 +- src/transformers/models/realm/tokenization_realm.py | 2 +- .../models/roformer/tokenization_roformer.py | 2 +- .../modeling_speech_encoder_decoder.py | 2 +- .../speech_to_text/configuration_speech_to_text.py | 4 ++-- .../speech_to_text_2/configuration_speech_to_text_2.py | 9 +++++---- .../models/splinter/tokenization_splinter.py | 4 ++-- .../models/splinter/tokenization_splinter_fast.py | 4 ++-- src/transformers/models/t5/modeling_tf_t5.py | 6 +++--- src/transformers/models/tapas/tokenization_tapas.py | 4 ++-- src/transformers/models/trocr/configuration_trocr.py | 2 +- .../modeling_tf_vision_encoder_decoder.py | 2 +- .../modeling_vision_encoder_decoder.py | 2 +- src/transformers/models/xglm/configuration_xglm.py | 2 +- 35 files changed, 58 insertions(+), 57 deletions(-) diff --git a/src/transformers/models/bart/configuration_bart.py b/src/transformers/models/bart/configuration_bart.py index 0ece76cc64077b..b7262f13fe34b2 100644 --- a/src/transformers/models/bart/configuration_bart.py +++ b/src/transformers/models/bart/configuration_bart.py @@ -77,17 +77,17 @@ class BartConfig(PretrainedConfig): just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). - num_labels: (`int`, *optional*, defaults to 3): + num_labels (`int`, *optional*, defaults to 3): The number of labels to use in [`BartForSequenceClassification`]. forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to diff --git a/src/transformers/models/bert/tokenization_bert.py b/src/transformers/models/bert/tokenization_bert.py index 233ef0ab0d5171..d398fc5154aec3 100644 --- a/src/transformers/models/bert/tokenization_bert.py +++ b/src/transformers/models/bert/tokenization_bert.py @@ -383,7 +383,7 @@ class BasicTokenizer(object): This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ diff --git a/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py index dd9fb89d55eced..f0c874f69405de 100644 --- a/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/configuration_bigbird_pegasus.py @@ -85,10 +85,10 @@ class BigBirdPegasusConfig(PretrainedConfig): just in case (e.g., 1024 or 2048 or 4096). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): diff --git a/src/transformers/models/blenderbot/configuration_blenderbot.py b/src/transformers/models/blenderbot/configuration_blenderbot.py index b8b8401291a970..3e0c5c7d14cf3b 100644 --- a/src/transformers/models/blenderbot/configuration_blenderbot.py +++ b/src/transformers/models/blenderbot/configuration_blenderbot.py @@ -78,10 +78,10 @@ class BlenderbotConfig(PretrainedConfig): just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): diff --git a/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py b/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py index bf3796e72920aa..abeca877c64cf1 100644 --- a/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/configuration_blenderbot_small.py @@ -78,10 +78,10 @@ class BlenderbotSmallConfig(PretrainedConfig): just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): diff --git a/src/transformers/models/detr/configuration_detr.py b/src/transformers/models/detr/configuration_detr.py index e46a5d610e8a8e..fb320afdd2014e 100644 --- a/src/transformers/models/detr/configuration_detr.py +++ b/src/transformers/models/detr/configuration_detr.py @@ -74,10 +74,10 @@ class DetrConfig(PretrainedConfig): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. init_xavier_std (`float`, *optional*, defaults to 1): The scaling factor used for the Xavier initialization gain in the HM Attention map module. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. auxiliary_loss (`bool`, *optional*, defaults to `False`): diff --git a/src/transformers/models/dpr/modeling_dpr.py b/src/transformers/models/dpr/modeling_dpr.py index 20174afd2ad82e..a14e944155db64 100644 --- a/src/transformers/models/dpr/modeling_dpr.py +++ b/src/transformers/models/dpr/modeling_dpr.py @@ -404,7 +404,7 @@ class DPRPretrainedReader(DPRPreTrainedModel): DPR_READER_INPUTS_DOCSTRING = r""" Args: - input_ids: (`Tuple[torch.LongTensor]` of shapes `(n_passages, sequence_length)`): + input_ids (`Tuple[torch.LongTensor]` of shapes `(n_passages, sequence_length)`): Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should be formatted with [CLS] and [SEP] with the format: diff --git a/src/transformers/models/dpr/modeling_tf_dpr.py b/src/transformers/models/dpr/modeling_tf_dpr.py index 0efbc821bc2bc7..7955e067254cf6 100644 --- a/src/transformers/models/dpr/modeling_tf_dpr.py +++ b/src/transformers/models/dpr/modeling_tf_dpr.py @@ -493,7 +493,7 @@ def serving(self, inputs): TF_DPR_READER_INPUTS_DOCSTRING = r""" Args: - input_ids: (`Numpy array` or `tf.Tensor` of shapes `(n_passages, sequence_length)`): + input_ids (`Numpy array` or `tf.Tensor` of shapes `(n_passages, sequence_length)`): Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should be formatted with [CLS] and [SEP] with the format: diff --git a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py index a7ff6a7e3aa95f..c4575819ee1f2f 100644 --- a/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_encoder_decoder.py @@ -136,7 +136,7 @@ more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple. - kwargs: (*optional*) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: + kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function. diff --git a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py index 714e2c231d1bd0..c74865617cf76d 100644 --- a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py @@ -147,7 +147,7 @@ training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). - kwargs: (*optional*) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: + kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. - With a *decoder_* prefix which will be input as `**decoder_kwargs`` for the decoder forward function. diff --git a/src/transformers/models/fsmt/configuration_fsmt.py b/src/transformers/models/fsmt/configuration_fsmt.py index 62237cf8685174..14298d6a1cc029 100644 --- a/src/transformers/models/fsmt/configuration_fsmt.py +++ b/src/transformers/models/fsmt/configuration_fsmt.py @@ -95,9 +95,9 @@ class FSMTConfig(PretrainedConfig): End of stream token id. decoder_start_token_id (`int`, *optional*): This model starts decoding with `eos_token_id` - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): Google "layerdrop arxiv", as its not explainable in one line. is_encoder_decoder (`bool`, *optional*, defaults to `True`): Whether this is an encoder/decoder model. diff --git a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py index db934e5e87259c..5944c98f075358 100644 --- a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2.py @@ -1362,7 +1362,7 @@ class BasicTokenizer(object): This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ diff --git a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py index b61cf5ef7633ad..e1d608c339fa65 100644 --- a/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py +++ b/src/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.py @@ -108,7 +108,7 @@ class LayoutLMv2TokenizerFast(PreTrainedTokenizerFast): tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original LayoutLMv2). """ diff --git a/src/transformers/models/led/configuration_led.py b/src/transformers/models/led/configuration_led.py index 37720c730af1e2..98d2e32f62e4e8 100644 --- a/src/transformers/models/led/configuration_led.py +++ b/src/transformers/models/led/configuration_led.py @@ -74,10 +74,10 @@ class LEDConfig(PretrainedConfig): The maximum sequence length that the decoder might ever be used with. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): diff --git a/src/transformers/models/m2m_100/configuration_m2m_100.py b/src/transformers/models/m2m_100/configuration_m2m_100.py index 39b810bb3871f7..0ab2365accd34a 100644 --- a/src/transformers/models/m2m_100/configuration_m2m_100.py +++ b/src/transformers/models/m2m_100/configuration_m2m_100.py @@ -76,10 +76,10 @@ class M2M100Config(PretrainedConfig): just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): diff --git a/src/transformers/models/marian/configuration_marian.py b/src/transformers/models/marian/configuration_marian.py index f662d388448bb4..e2db7b526561d5 100644 --- a/src/transformers/models/marian/configuration_marian.py +++ b/src/transformers/models/marian/configuration_marian.py @@ -76,10 +76,10 @@ class MarianConfig(PretrainedConfig): just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): diff --git a/src/transformers/models/mbart/configuration_mbart.py b/src/transformers/models/mbart/configuration_mbart.py index af67cf858db177..8618ec5ebbe15a 100644 --- a/src/transformers/models/mbart/configuration_mbart.py +++ b/src/transformers/models/mbart/configuration_mbart.py @@ -76,10 +76,10 @@ class MBartConfig(PretrainedConfig): just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): diff --git a/src/transformers/models/mpnet/tokenization_mpnet.py b/src/transformers/models/mpnet/tokenization_mpnet.py index 713a528d557a04..28d8b7096ae118 100644 --- a/src/transformers/models/mpnet/tokenization_mpnet.py +++ b/src/transformers/models/mpnet/tokenization_mpnet.py @@ -340,7 +340,7 @@ class BasicTokenizer(object): This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ diff --git a/src/transformers/models/pegasus/configuration_pegasus.py b/src/transformers/models/pegasus/configuration_pegasus.py index 91ce7c35ae555b..ae5f8f007573b7 100644 --- a/src/transformers/models/pegasus/configuration_pegasus.py +++ b/src/transformers/models/pegasus/configuration_pegasus.py @@ -71,10 +71,10 @@ class PegasusConfig(PretrainedConfig): just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): diff --git a/src/transformers/models/plbart/configuration_plbart.py b/src/transformers/models/plbart/configuration_plbart.py index 75bdd1f5dea5ab..94a96a675d1bd6 100644 --- a/src/transformers/models/plbart/configuration_plbart.py +++ b/src/transformers/models/plbart/configuration_plbart.py @@ -74,10 +74,10 @@ class PLBartConfig(PretrainedConfig): just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `True`): diff --git a/src/transformers/models/prophetnet/tokenization_prophetnet.py b/src/transformers/models/prophetnet/tokenization_prophetnet.py index c7725974039043..f18bf9d3360386 100644 --- a/src/transformers/models/prophetnet/tokenization_prophetnet.py +++ b/src/transformers/models/prophetnet/tokenization_prophetnet.py @@ -94,7 +94,7 @@ class ProphetNetTokenizer(PreTrainedTokenizer): This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ diff --git a/src/transformers/models/rag/configuration_rag.py b/src/transformers/models/rag/configuration_rag.py index 6046b934cd6499..109588eadbdfaa 100644 --- a/src/transformers/models/rag/configuration_rag.py +++ b/src/transformers/models/rag/configuration_rag.py @@ -49,7 +49,7 @@ `"compressed"`. index_path (`str`, *optional*) The path to the serialized faiss index on disk. - passages_path: (`str`, *optional*): + passages_path (`str`, *optional*): A path to text passages compatible with the faiss index. Required if using [`~models.rag.retrieval_rag.LegacyIndex`] use_dummy_dataset (`bool`, *optional*, defaults to `False`) diff --git a/src/transformers/models/realm/tokenization_realm.py b/src/transformers/models/realm/tokenization_realm.py index 63295826d462b6..28ee33ab87bc07 100644 --- a/src/transformers/models/realm/tokenization_realm.py +++ b/src/transformers/models/realm/tokenization_realm.py @@ -132,7 +132,7 @@ class RealmTokenizer(PreTrainedTokenizer): This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ diff --git a/src/transformers/models/roformer/tokenization_roformer.py b/src/transformers/models/roformer/tokenization_roformer.py index ac1efc72d08978..10f69dbdb91f12 100644 --- a/src/transformers/models/roformer/tokenization_roformer.py +++ b/src/transformers/models/roformer/tokenization_roformer.py @@ -103,7 +103,7 @@ class RoFormerTokenizer(PreTrainedTokenizer): This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). diff --git a/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py b/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py index 388be244994766..174210887066e1 100644 --- a/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py +++ b/src/transformers/models/speech_encoder_decoder/modeling_speech_encoder_decoder.py @@ -143,7 +143,7 @@ into a tensor of type `torch.FloatTensor`. See [`~Speech2TextFeatureExtractor.__call__`] return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple. - kwargs: (*optional*) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: + kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function. diff --git a/src/transformers/models/speech_to_text/configuration_speech_to_text.py b/src/transformers/models/speech_to_text/configuration_speech_to_text.py index f12be50b538cef..c5f47a6f892bb5 100644 --- a/src/transformers/models/speech_to_text/configuration_speech_to_text.py +++ b/src/transformers/models/speech_to_text/configuration_speech_to_text.py @@ -70,10 +70,10 @@ class Speech2TextConfig(PretrainedConfig): The dropout ratio for classifier. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): diff --git a/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py b/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py index c1b3cf7e4c7fb7..b7f1bc34a280cf 100644 --- a/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py +++ b/src/transformers/models/speech_to_text_2/configuration_speech_to_text_2.py @@ -64,14 +64,15 @@ class Speech2Text2Config(PretrainedConfig): The dropout ratio for classifier. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - https://arxiv.org/abs/1909.11556>`__ for more details. decoder_layerdrop: (`float`, *optional*, defaults to - 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see - https://arxiv.org/abs/1909.11556) for more details. + https://arxiv.org/abs/1909.11556>`__ for more details. + decoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). max_source_positions (`int`, *optional*, defaults to 6000): The maximum sequence length of log-mel filter-bank features that this model might ever be used with. - max_target_positions: (`int`, *optional*, defaults to 1024): + max_target_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). diff --git a/src/transformers/models/splinter/tokenization_splinter.py b/src/transformers/models/splinter/tokenization_splinter.py index f600566e6e9411..40daeb09465ad1 100644 --- a/src/transformers/models/splinter/tokenization_splinter.py +++ b/src/transformers/models/splinter/tokenization_splinter.py @@ -111,7 +111,7 @@ class SplinterTokenizer(PreTrainedTokenizer): This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ @@ -340,7 +340,7 @@ class BasicTokenizer(object): This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ diff --git a/src/transformers/models/splinter/tokenization_splinter_fast.py b/src/transformers/models/splinter/tokenization_splinter_fast.py index 103ead72ae0f9d..6eb69755905ace 100644 --- a/src/transformers/models/splinter/tokenization_splinter_fast.py +++ b/src/transformers/models/splinter/tokenization_splinter_fast.py @@ -87,10 +87,10 @@ class SplinterTokenizerFast(PreTrainedTokenizerFast): tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). - wordpieces_prefix: (`str`, *optional*, defaults to `"##"`): + wordpieces_prefix (`str`, *optional*, defaults to `"##"`): The prefix for subwords. """ diff --git a/src/transformers/models/t5/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py index 2eebdfd1cb60e6..091cb9d63eb42d 100644 --- a/src/transformers/models/t5/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -1008,14 +1008,14 @@ def _shift_right(self, input_ids): decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. - head_mask: (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. - decoder_head_mask: (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + decoder_head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules in the decoder. Mask values selected in `[0, 1]`: @@ -1084,7 +1084,7 @@ def _shift_right(self, input_ids): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. - head_mask: (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, diff --git a/src/transformers/models/tapas/tokenization_tapas.py b/src/transformers/models/tapas/tokenization_tapas.py index ddb855642f4338..5c8c9d4f6e2572 100644 --- a/src/transformers/models/tapas/tokenization_tapas.py +++ b/src/transformers/models/tapas/tokenization_tapas.py @@ -293,7 +293,7 @@ class TapasTokenizer(PreTrainedTokenizer): tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). cell_trim_length (`int`, *optional*, defaults to -1): @@ -2053,7 +2053,7 @@ class BasicTokenizer(object): This should likely be deactivated for Japanese (see this [issue](https://github.com/huggingface/transformers/issues/328)). - strip_accents: (`bool`, *optional*): + strip_accents (`bool`, *optional*): Whether or not to strip all accents. If this option is not specified, then it will be determined by the value for `lowercase` (as in the original BERT). """ diff --git a/src/transformers/models/trocr/configuration_trocr.py b/src/transformers/models/trocr/configuration_trocr.py index a635e6b9b09729..0f8729df6917bb 100644 --- a/src/transformers/models/trocr/configuration_trocr.py +++ b/src/transformers/models/trocr/configuration_trocr.py @@ -67,7 +67,7 @@ class TrOCRConfig(PretrainedConfig): The dropout ratio for classifier. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): diff --git a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py index 682faa3825c592..45924e2666c90e 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py @@ -136,7 +136,7 @@ training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). - kwargs: (*optional*) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: + kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function. diff --git a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py index d2c4ae6b18cf32..4906c13a9c0276 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py @@ -137,7 +137,7 @@ def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start more detail. return_dict (`bool`, *optional*): If set to `True`, the model will return a [`~utils.Seq2SeqLMOutput`] instead of a plain tuple. - kwargs: (*optional*) Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: + kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments come in two flavors: - Without a prefix which will be input as `**encoder_kwargs` for the encoder forward function. - With a *decoder_* prefix which will be input as `**decoder_kwargs` for the decoder forward function. diff --git a/src/transformers/models/xglm/configuration_xglm.py b/src/transformers/models/xglm/configuration_xglm.py index f7a1d47abc2780..c9ac1111a08d78 100644 --- a/src/transformers/models/xglm/configuration_xglm.py +++ b/src/transformers/models/xglm/configuration_xglm.py @@ -61,7 +61,7 @@ class XGLMConfig(PretrainedConfig): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. - layerdrop: (`float`, *optional*, defaults to 0.0): + layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. init_std (`float`, *optional*, defaults to 0.02): From 6667b0d7bf0a135b9a85d2d21b7eb25ec8ad58cb Mon Sep 17 00:00:00 2001 From: SaulLu <55560583+SaulLu@users.noreply.github.com> Date: Wed, 24 Aug 2022 12:27:56 +0200 Subject: [PATCH 140/539] add warning to let the user know that the `__call__` method is faster than `encode` + `pad` for a fast tokenizer (#18693) * add warning to let the user know that the method is slower that for a fast tokenizer * user warnings * fix layoutlmv2 * fix layout* * change warnings into logger.warning --- src/transformers/tokenization_utils_base.py | 14 ++++- .../test_tokenization_layoutlmv2.py | 55 ++++++++++++++++++- .../test_tokenization_layoutlmv3.py | 55 ++++++++++++++++++- .../layoutxlm/test_tokenization_layoutxlm.py | 53 +++++++++++++++++- tests/test_tokenization_common.py | 44 +++++++++++++++ 5 files changed, 217 insertions(+), 4 deletions(-) diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index fd8e1ee58551e4..0b01163c0b15e6 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -2821,7 +2821,10 @@ def pad( in the batch. Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`, - `self.pad_token_id` and `self.pad_token_type_id`) + `self.pad_token_id` and `self.pad_token_type_id`). + + Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the + text followed by a call to the `pad` method to get a padded encoding. @@ -2871,6 +2874,15 @@ def pad( verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. """ + if self.__class__.__name__.endswith("Fast"): + if not self.deprecation_warnings.get("Asking-to-pad-a-fast-tokenizer", False): + logger.warning_advice( + f"You're using a {self.__class__.__name__} tokenizer. Please note that with a fast tokenizer," + " using the `__call__` method is faster than using a method to encode the text followed by a call" + " to the `pad` method to get a padded encoding." + ) + self.deprecation_warnings["Asking-to-pad-a-fast-tokenizer"] = True + # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(encoded_inputs, (list, tuple)) and isinstance(encoded_inputs[0], Mapping): diff --git a/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py b/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py index 049caae6419418..0aadd099f21051 100644 --- a/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py +++ b/tests/models/layoutlmv2/test_tokenization_layoutlmv2.py @@ -21,7 +21,14 @@ import unittest from typing import List -from transformers import AddedToken, LayoutLMv2TokenizerFast, SpecialTokensMixin, is_tf_available, is_torch_available +from transformers import ( + AddedToken, + LayoutLMv2TokenizerFast, + SpecialTokensMixin, + is_tf_available, + is_torch_available, + logging, +) from transformers.models.layoutlmv2.tokenization_layoutlmv2 import ( VOCAB_FILES_NAMES, BasicTokenizer, @@ -41,6 +48,9 @@ ) +logger = logging.get_logger(__name__) + + @require_tokenizers @require_pandas class LayoutLMv2TokenizationTest(TokenizerTesterMixin, unittest.TestCase): @@ -788,6 +798,49 @@ def test_padding(self, max_length=50): self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) + def test_padding_warning_message_fast_tokenizer(self): + if not self.test_rust_tokenizer: + return + + words, boxes = self.get_words_and_boxes_batch() + + tokenizer_fast = self.get_rust_tokenizer() + + encoding_fast = tokenizer_fast( + words, + boxes=boxes, + ) + + with self.assertLogs("transformers", level="WARNING") as cm: + tokenizer_fast.pad(encoding_fast) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to" + " encode the text followed by a call to the `pad` method to get a padded encoding.", + cm.records[0].message, + ) + + if not self.test_slow_tokenizer: + return + + tokenizer_slow = self.get_tokenizer() + + encoding_slow = tokenizer_slow( + words, + boxes=boxes, + ) + + with self.assertLogs(level="WARNING") as cm: + # We want to assert there are no warnings, but the 'assertLogs' method does not support that. + # Therefore, we are adding a dummy warning, and then we will assert it is the only warning. + logger.warning("Dummy warning") + tokenizer_slow.pad(encoding_slow) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Dummy warning", + cm.records[0].message, + ) + def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus tokenizers = self.get_tokenizers(do_lower_case=False) diff --git a/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py b/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py index 239939ca26964e..419b95feee23a6 100644 --- a/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py @@ -22,13 +22,23 @@ import unittest from typing import List -from transformers import AddedToken, LayoutLMv3TokenizerFast, SpecialTokensMixin, is_tf_available, is_torch_available +from transformers import ( + AddedToken, + LayoutLMv3TokenizerFast, + SpecialTokensMixin, + is_tf_available, + is_torch_available, + logging, +) from transformers.models.layoutlmv3.tokenization_layoutlmv3 import VOCAB_FILES_NAMES, LayoutLMv3Tokenizer from transformers.testing_utils import is_pt_tf_cross_test, require_pandas, require_tokenizers, require_torch, slow from ...test_tokenization_common import SMALL_TRAINING_CORPUS, TokenizerTesterMixin, merge_model_tokenizer_mappings +logger = logging.get_logger(__name__) + + @require_tokenizers @require_pandas class LayoutLMv3TokenizationTest(TokenizerTesterMixin, unittest.TestCase): @@ -668,6 +678,49 @@ def test_padding(self, max_length=50): self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) + def test_padding_warning_message_fast_tokenizer(self): + if not self.test_rust_tokenizer: + return + + words, boxes = self.get_words_and_boxes_batch() + + tokenizer_fast = self.get_rust_tokenizer() + + encoding_fast = tokenizer_fast( + words, + boxes=boxes, + ) + + with self.assertLogs("transformers", level="WARNING") as cm: + tokenizer_fast.pad(encoding_fast) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to" + " encode the text followed by a call to the `pad` method to get a padded encoding.", + cm.records[0].message, + ) + + if not self.test_slow_tokenizer: + return + + tokenizer_slow = self.get_tokenizer() + + encoding_slow = tokenizer_slow( + words, + boxes=boxes, + ) + + with self.assertLogs(level="WARNING") as cm: + # We want to assert there are no warnings, but the 'assertLogs' method does not support that. + # Therefore, we are adding a dummy warning, and then we will assert it is the only warning. + logger.warning("Dummy warning") + tokenizer_slow.pad(encoding_slow) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Dummy warning", + cm.records[0].message, + ) + def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus tokenizers = self.get_tokenizers(do_lower_case=False) diff --git a/tests/models/layoutxlm/test_tokenization_layoutxlm.py b/tests/models/layoutxlm/test_tokenization_layoutxlm.py index 68aba50ecaf403..545e896e9ed689 100644 --- a/tests/models/layoutxlm/test_tokenization_layoutxlm.py +++ b/tests/models/layoutxlm/test_tokenization_layoutxlm.py @@ -19,7 +19,14 @@ import unittest from typing import List -from transformers import AddedToken, LayoutXLMTokenizerFast, SpecialTokensMixin, is_tf_available, is_torch_available +from transformers import ( + AddedToken, + LayoutXLMTokenizerFast, + SpecialTokensMixin, + is_tf_available, + is_torch_available, + logging, +) from transformers.models.layoutxlm.tokenization_layoutxlm import LayoutXLMTokenizer from transformers.testing_utils import ( get_tests_dir, @@ -40,6 +47,7 @@ ) +logger = logging.get_logger(__name__) SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @@ -697,6 +705,49 @@ def test_padding(self, max_length=50): self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) + def test_padding_warning_message_fast_tokenizer(self): + if not self.test_rust_tokenizer: + return + + words, boxes = self.get_words_and_boxes_batch() + + tokenizer_fast = self.get_rust_tokenizer() + + encoding_fast = tokenizer_fast( + words, + boxes=boxes, + ) + + with self.assertLogs("transformers", level="WARNING") as cm: + tokenizer_fast.pad(encoding_fast) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to" + " encode the text followed by a call to the `pad` method to get a padded encoding.", + cm.records[0].message, + ) + + if not self.test_slow_tokenizer: + return + + tokenizer_slow = self.get_tokenizer() + + encoding_slow = tokenizer_slow( + words, + boxes=boxes, + ) + + with self.assertLogs(level="WARNING") as cm: + # We want to assert there are no warnings, but the 'assertLogs' method does not support that. + # Therefore, we are adding a dummy warning, and then we will assert it is the only warning. + logger.warning("Dummy warning") + tokenizer_slow.pad(encoding_slow) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Dummy warning", + cm.records[0].message, + ) + def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus tokenizers = self.get_tokenizers(do_lower_case=False) diff --git a/tests/test_tokenization_common.py b/tests/test_tokenization_common.py index 5941a571189960..e1522ba3c66051 100644 --- a/tests/test_tokenization_common.py +++ b/tests/test_tokenization_common.py @@ -48,6 +48,7 @@ is_tf_available, is_tokenizers_available, is_torch_available, + logging, ) from transformers.testing_utils import ( TOKEN, @@ -81,6 +82,8 @@ from test_module.custom_tokenization_fast import CustomTokenizerFast +logger = logging.get_logger(__name__) + NON_ENGLISH_TAGS = ["chinese", "dutch", "french", "finnish", "german", "multilingual"] SMALL_TRAINING_CORPUS = [ @@ -1834,6 +1837,47 @@ def test_encode_plus_with_padding(self): self.assertEqual(attention_mask + [0] * padding_size, right_padded_attention_mask) self.assertEqual([0] * padding_size + attention_mask, left_padded_attention_mask) + def test_padding_warning_message_fast_tokenizer(self): + if not self.test_rust_tokenizer: + return + + sequence = "This is a text" + + tokenizer_fast = self.get_rust_tokenizer() + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer_fast, sequence) + + encoding_fast = tokenizer_fast(sequence) + + with self.assertLogs("transformers", level="WARNING") as cm: + tokenizer_fast.pad(encoding_fast) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to" + " encode the text followed by a call to the `pad` method to get a padded encoding.", + cm.records[0].message, + ) + + if not self.test_slow_tokenizer: + return + + tokenizer_slow = self.get_tokenizer() + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer_slow, sequence) + + encoding_slow = tokenizer_slow(sequence) + + with self.assertLogs(level="WARNING") as cm: + # We want to assert there are no warnings, but the 'assertLogs' method does not support that. + # Therefore, we are adding a dummy warning, and then we will assert it is the only warning. + logger.warning("Dummy warning") + tokenizer_slow.pad(encoding_slow) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Dummy warning", + cm.records[0].message, + ) + def test_separate_tokenizers(self): # This tests that tokenizers don't impact others. Unfortunately the case where it fails is when # we're loading an S3 configuration from a pre-trained identifier, and we have no way of testing those today. From c55d6e4e10ce2d9c37e5f677f0842b04ef8b73f3 Mon Sep 17 00:00:00 2001 From: Rahul A R Date: Wed, 24 Aug 2022 12:12:42 -0400 Subject: [PATCH 141/539] examples/run_summarization_no_trainer: fixed incorrect param to hasattr (#18720) * fixed incorrect param to hasattr * simplified condition checks * code cleanup --- .../summarization/run_summarization_no_trainer.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index b75b4bf7d4c13a..89365b4de4e98c 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -573,12 +573,9 @@ def postprocess_text(preds, labels): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. From 3223d49354e41dfa44649a9829c7b09013ad096e Mon Sep 17 00:00:00 2001 From: Patrick Deutschmann Date: Thu, 25 Aug 2022 08:34:42 +0200 Subject: [PATCH 142/539] Add ONNX support for Longformer (#17176) * Implement ONNX support for Longformer Fix repo consistency check complaints Fix value mismatches Add pooler output for default model Increase validation atol to accommodate multiple-choice error Fix copies Fix chunking for longer sequence lengths Add future comment * Fix issue in mask_invalid_locations * Remove torch imports in configuration_longformer * Change config access to fix LED * Push opset version to support tril * Work in review comments (mostly style) * Add Longformer to ONNX tests --- docs/source/en/serialization.mdx | 1 + src/transformers/models/led/modeling_led.py | 87 +++++++++++++----- .../longformer/configuration_longformer.py | 77 +++++++++++++++- .../models/longformer/modeling_longformer.py | 89 +++++++++++++------ src/transformers/onnx/features.py | 9 ++ tests/onnx/test_onnx_v2.py | 1 + 6 files changed, 212 insertions(+), 52 deletions(-) diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index 0aacdf76f7ef0f..89b73df4f5dfa6 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -74,6 +74,7 @@ Ready-made configurations include the following architectures: - LayoutLM - LayoutLMv3 - LeViT +- Longformer - LongT5 - M2M100 - Marian diff --git a/src/transformers/models/led/modeling_led.py b/src/transformers/models/led/modeling_led.py index 0837ac2bc423fc..ff79c0cad45b08 100755 --- a/src/transformers/models/led/modeling_led.py +++ b/src/transformers/models/led/modeling_led.py @@ -160,6 +160,8 @@ def __init__(self, config, layer_id): self.one_sided_attn_window_size = attention_window // 2 + self.config = config + def forward( self, hidden_states, @@ -389,24 +391,48 @@ def _pad_and_diagonalize(chunked_hidden_states): return chunked_hidden_states @staticmethod - def _chunk(hidden_states, window_overlap): + def _chunk(hidden_states, window_overlap, onnx_export=False): """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" + if not onnx_export: + # non-overlapping chunks of size = 2w + hidden_states = hidden_states.view( + hidden_states.size(0), + torch.div(hidden_states.size(1), (window_overlap * 2), rounding_mode="trunc"), + window_overlap * 2, + hidden_states.size(2), + ) + # use `as_strided` to make the chunks overlap with an overlap size = window_overlap + chunk_size = list(hidden_states.size()) + chunk_size[1] = chunk_size[1] * 2 - 1 + + chunk_stride = list(hidden_states.stride()) + chunk_stride[1] = chunk_stride[1] // 2 + return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) + + # When exporting to ONNX, use this separate logic + if hidden_states.size(1) == window_overlap * 2: + # simplest case + return hidden_states.unsqueeze(1) + else: + # have to use slow implementation since as_strided, unfold and 2d-tensor indexing aren't supported (yet) in ONNX export - # non-overlapping chunks of size = 2w - hidden_states = hidden_states.view( - hidden_states.size(0), - hidden_states.size(1) // (window_overlap * 2), - window_overlap * 2, - hidden_states.size(2), - ) + # TODO replace this with + # > return hidden_states.unfold(dimension=1, size=window_overlap * 2, step=window_overlap).transpose(2, 3) + # once `unfold` is supported - # use `as_strided` to make the chunks overlap with an overlap size = window_overlap - chunk_size = list(hidden_states.size()) - chunk_size[1] = chunk_size[1] * 2 - 1 + chunk_size = [ + hidden_states.size(0), + hidden_states.size(1) // window_overlap - 1, + window_overlap * 2, + hidden_states.size(2), + ] - chunk_stride = list(hidden_states.stride()) - chunk_stride[1] = chunk_stride[1] // 2 - return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) + overlapping_chunks = torch.empty(chunk_size) + for chunk in range(chunk_size[1]): + overlapping_chunks[:, chunk, :, :] = hidden_states[ + :, chunk * window_overlap : chunk * window_overlap + 2 * window_overlap, : + ] + return overlapping_chunks @staticmethod def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor: @@ -415,10 +441,14 @@ def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor: ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) - beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 + input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] = torch.full_like( + beginning_input, -float("inf") + ).where(beginning_mask.bool(), beginning_input) ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] ending_mask = ending_mask.expand(ending_input.size()) - ending_input.masked_fill_(ending_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 + input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] = torch.full_like( + ending_input, -float("inf") + ).where(ending_mask.bool(), ending_input) def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): """ @@ -432,14 +462,14 @@ def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tenso ), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}" assert query.size() == key.size() - chunks_count = seq_len // window_overlap - 1 + chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) - query = self._chunk(query, window_overlap) - key = self._chunk(key, window_overlap) + query = self._chunk(query, window_overlap, self.config.__dict__.get("onnx_export", False)) + key = self._chunk(key, window_overlap, self.config.__dict__.get("onnx_export", False)) # matrix multiplication # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim @@ -457,7 +487,7 @@ def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tenso # window_overlap previous words). The following column is attention score from each word to itself, then # followed by window_overlap columns for the upper triangle. - diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( + diagonal_attention_scores = diagonal_chunked_attention_scores.new_zeros( (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) ) @@ -498,11 +528,14 @@ def _sliding_chunks_matmul_attn_probs_value( assert seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1 - chunks_count = seq_len // window_overlap - 1 + chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( - batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 + batch_size * num_heads, + torch.div(seq_len, window_overlap, rounding_mode="trunc"), + window_overlap, + 2 * window_overlap + 1, ) # group batch_size and num_heads dimensions into one @@ -577,9 +610,12 @@ def _concat_with_global_key_attn_probs( # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global)) + # need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets + attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3) attn_probs_from_global_key[ - is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] + is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, : ] = torch.finfo(attn_probs_from_global_key.dtype).min + attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3) return attn_probs_from_global_key @@ -673,9 +709,12 @@ def _compute_global_attn_output_from_hidden( global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) + # need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets + global_attn_scores = global_attn_scores.transpose(1, 2) global_attn_scores[ - is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : + is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, : ] = torch.finfo(global_attn_scores.dtype).min + global_attn_scores = global_attn_scores.transpose(1, 2) global_attn_scores = global_attn_scores.masked_fill( is_index_masked[:, None, None, :], diff --git a/src/transformers/models/longformer/configuration_longformer.py b/src/transformers/models/longformer/configuration_longformer.py index 53ceeafb64bad2..977ca3e639c50e 100644 --- a/src/transformers/models/longformer/configuration_longformer.py +++ b/src/transformers/models/longformer/configuration_longformer.py @@ -13,12 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. """ Longformer configuration""" -from typing import List, Union +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union -from ...utils import logging +from ...onnx import OnnxConfig +from ...utils import TensorType, logging from ..roberta.configuration_roberta import RobertaConfig +if TYPE_CHECKING: + from ...configuration_utils import PretrainedConfig + from ...onnx.config import PatchingSpec + from ...tokenization_utils_base import PreTrainedTokenizerBase + + logger = logging.get_logger(__name__) LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { @@ -71,6 +79,69 @@ class LongformerConfig(RobertaConfig): ```""" model_type = "longformer" - def __init__(self, attention_window: Union[List[int], int] = 512, sep_token_id: int = 2, **kwargs): + def __init__( + self, attention_window: Union[List[int], int] = 512, sep_token_id: int = 2, onnx_export: bool = False, **kwargs + ): super().__init__(sep_token_id=sep_token_id, **kwargs) self.attention_window = attention_window + self.onnx_export = onnx_export + + +class LongformerOnnxConfig(OnnxConfig): + def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: "List[PatchingSpec]" = None): + super().__init__(config, task, patching_specs) + config.onnx_export = True + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "multiple-choice": + dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} + else: + dynamic_axis = {0: "batch", 1: "sequence"} + return OrderedDict( + [ + ("input_ids", dynamic_axis), + ("attention_mask", dynamic_axis), + ("global_attention_mask", dynamic_axis), + ] + ) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + outputs = super().outputs + if self.task == "default": + outputs["pooler_output"] = {0: "batch"} + return outputs + + @property + def atol_for_validation(self) -> float: + """ + What absolute tolerance value to use during model conversion validation. + + Returns: + Float absolute tolerance value. + """ + return 1e-4 + + @property + def default_onnx_opset(self) -> int: + # needs to be >= 14 to support tril operator + return max(super().default_onnx_opset, 14) + + def generate_dummy_inputs( + self, + tokenizer: "PreTrainedTokenizerBase", + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + inputs = super().generate_dummy_inputs( + preprocessor=tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + import torch + + inputs["global_attention_mask"] = torch.zeros_like(inputs["input_ids"]) + # make every second token global + inputs["global_attention_mask"][:, ::2] = 1 + return inputs diff --git a/src/transformers/models/longformer/modeling_longformer.py b/src/transformers/models/longformer/modeling_longformer.py index 7661f90bfbb48b..00cd227a68f6f0 100755 --- a/src/transformers/models/longformer/modeling_longformer.py +++ b/src/transformers/models/longformer/modeling_longformer.py @@ -532,6 +532,8 @@ def __init__(self, config, layer_id): self.one_sided_attn_window_size = attention_window // 2 + self.config = config + def forward( self, hidden_states, @@ -761,24 +763,48 @@ def _pad_and_diagonalize(chunked_hidden_states): return chunked_hidden_states @staticmethod - def _chunk(hidden_states, window_overlap): + def _chunk(hidden_states, window_overlap, onnx_export=False): """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" + if not onnx_export: + # non-overlapping chunks of size = 2w + hidden_states = hidden_states.view( + hidden_states.size(0), + torch.div(hidden_states.size(1), (window_overlap * 2), rounding_mode="trunc"), + window_overlap * 2, + hidden_states.size(2), + ) + # use `as_strided` to make the chunks overlap with an overlap size = window_overlap + chunk_size = list(hidden_states.size()) + chunk_size[1] = chunk_size[1] * 2 - 1 + + chunk_stride = list(hidden_states.stride()) + chunk_stride[1] = chunk_stride[1] // 2 + return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) + + # When exporting to ONNX, use this separate logic + if hidden_states.size(1) == window_overlap * 2: + # simplest case + return hidden_states.unsqueeze(1) + else: + # have to use slow implementation since as_strided, unfold and 2d-tensor indexing aren't supported (yet) in ONNX export - # non-overlapping chunks of size = 2w - hidden_states = hidden_states.view( - hidden_states.size(0), - hidden_states.size(1) // (window_overlap * 2), - window_overlap * 2, - hidden_states.size(2), - ) + # TODO replace this with + # > return hidden_states.unfold(dimension=1, size=window_overlap * 2, step=window_overlap).transpose(2, 3) + # once `unfold` is supported - # use `as_strided` to make the chunks overlap with an overlap size = window_overlap - chunk_size = list(hidden_states.size()) - chunk_size[1] = chunk_size[1] * 2 - 1 + chunk_size = [ + hidden_states.size(0), + hidden_states.size(1) // window_overlap - 1, + window_overlap * 2, + hidden_states.size(2), + ] - chunk_stride = list(hidden_states.stride()) - chunk_stride[1] = chunk_stride[1] // 2 - return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) + overlapping_chunks = torch.empty(chunk_size) + for chunk in range(chunk_size[1]): + overlapping_chunks[:, chunk, :, :] = hidden_states[ + :, chunk * window_overlap : chunk * window_overlap + 2 * window_overlap, : + ] + return overlapping_chunks @staticmethod def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor: @@ -787,10 +813,14 @@ def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor: ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) - beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 + input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] = torch.full_like( + beginning_input, -float("inf") + ).where(beginning_mask.bool(), beginning_input) ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] ending_mask = ending_mask.expand(ending_input.size()) - ending_input.masked_fill_(ending_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 + input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] = torch.full_like( + ending_input, -float("inf") + ).where(ending_mask.bool(), ending_input) def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): """ @@ -804,14 +834,14 @@ def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tenso ), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}" assert query.size() == key.size() - chunks_count = seq_len // window_overlap - 1 + chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) - query = self._chunk(query, window_overlap) - key = self._chunk(key, window_overlap) + query = self._chunk(query, window_overlap, self.config.__dict__.get("onnx_export", False)) + key = self._chunk(key, window_overlap, self.config.__dict__.get("onnx_export", False)) # matrix multiplication # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim @@ -829,7 +859,7 @@ def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tenso # window_overlap previous words). The following column is attention score from each word to itself, then # followed by window_overlap columns for the upper triangle. - diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( + diagonal_attention_scores = diagonal_chunked_attention_scores.new_zeros( (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) ) @@ -870,11 +900,14 @@ def _sliding_chunks_matmul_attn_probs_value( assert seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1 - chunks_count = seq_len // window_overlap - 1 + chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( - batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 + batch_size * num_heads, + torch.div(seq_len, window_overlap, rounding_mode="trunc"), + window_overlap, + 2 * window_overlap + 1, ) # group batch_size and num_heads dimensions into one @@ -949,9 +982,12 @@ def _concat_with_global_key_attn_probs( # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global)) + # need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets + attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3) attn_probs_from_global_key[ - is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] + is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, : ] = torch.finfo(attn_probs_from_global_key.dtype).min + attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3) return attn_probs_from_global_key @@ -1045,9 +1081,12 @@ def _compute_global_attn_output_from_hidden( global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) + # need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets + global_attn_scores = global_attn_scores.transpose(1, 2) global_attn_scores[ - is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : + is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, : ] = torch.finfo(global_attn_scores.dtype).min + global_attn_scores = global_attn_scores.transpose(1, 2) global_attn_scores = global_attn_scores.masked_fill( is_index_masked[:, None, None, :], @@ -1588,7 +1627,7 @@ def _pad_to_window_size( inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad( - attention_mask, (0, padding_len), value=False + attention_mask, (0, padding_len), value=0 ) # no attention on the padding tokens token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index 3596fe1840094f..3f18c369832bd0 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -358,6 +358,15 @@ class FeaturesManager: "seq2seq-lm-with-past", onnx_config_cls="models.longt5.LongT5OnnxConfig", ), + "longformer": supported_features_mapping( + "default", + "masked-lm", + "multiple-choice", + "question-answering", + "sequence-classification", + "token-classification", + onnx_config_cls="models.longformer.LongformerOnnxConfig", + ), "marian": supported_features_mapping( "default", "default-with-past", diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index 829c7ec0a42ab0..52ced984ca8007 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -212,6 +212,7 @@ def test_values_override(self): ("data2vec-vision", "facebook/data2vec-vision-base"), ("perceiver", "deepmind/language-perceiver", ("masked-lm", "sequence-classification")), ("perceiver", "deepmind/vision-perceiver-conv", ("image-classification",)), + ("longformer", "allenai/longformer-base-4096"), ("yolos", "hustvl/yolos-tiny"), } From fbf382c84da4506484a23e85bd8540da5192ff4e Mon Sep 17 00:00:00 2001 From: Craig Chan <46288912+rachthree@users.noreply.github.com> Date: Thu, 25 Aug 2022 07:31:34 -0700 Subject: [PATCH 143/539] Determine framework automatically before ONNX export (#18615) * Automatic detection for framework to use when exporting to ONNX * Log message change * Incorporating PR comments, adding unit test * Adding tf for pip install for run_tests_onnxruntime CI * Restoring past changes to circleci yaml and test_onnx_v2.py, tests moved to tests/onnx/test_features.py * Fixup * Adding test to fetcher * Updating circleci config to log more * Changing test class name * Comment typo fix in tests/onnx/test_features.py Co-authored-by: lewtun * Moving torch_str/tf_str to self.framework_pt/tf * Remove -rA flag in circleci config Co-authored-by: lewtun --- .circleci/config.yml | 4 +- src/transformers/onnx/__main__.py | 10 ++- src/transformers/onnx/features.py | 63 +++++++++++++++-- tests/onnx/test_features.py | 111 ++++++++++++++++++++++++++++++ utils/tests_fetcher.py | 2 +- 5 files changed, 182 insertions(+), 8 deletions(-) create mode 100644 tests/onnx/test_features.py diff --git a/.circleci/config.yml b/.circleci/config.yml index 3b895d0dd17100..a8fcedad3f47c4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -878,7 +878,7 @@ jobs: - v0.5-torch-{{ checksum "setup.py" }} - v0.5-{{ checksum "setup.py" }} - run: pip install --upgrade pip - - run: pip install .[torch,testing,sentencepiece,onnxruntime,vision,rjieba] + - run: pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba] - save_cache: key: v0.5-onnx-{{ checksum "setup.py" }} paths: @@ -912,7 +912,7 @@ jobs: - v0.5-torch-{{ checksum "setup.py" }} - v0.5-{{ checksum "setup.py" }} - run: pip install --upgrade pip - - run: pip install .[torch,testing,sentencepiece,onnxruntime,vision] + - run: pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision] - save_cache: key: v0.5-onnx-{{ checksum "setup.py" }} paths: diff --git a/src/transformers/onnx/__main__.py b/src/transformers/onnx/__main__.py index 6d665b35566f2c..55ad5f54c99494 100644 --- a/src/transformers/onnx/__main__.py +++ b/src/transformers/onnx/__main__.py @@ -38,7 +38,15 @@ def main(): "--atol", type=float, default=None, help="Absolute difference tolerence when validating the model." ) parser.add_argument( - "--framework", type=str, choices=["pt", "tf"], default="pt", help="The framework to use for the ONNX export." + "--framework", + type=str, + choices=["pt", "tf"], + default=None, + help=( + "The framework to use for the ONNX export." + " If not provided, will attempt to use the local checkpoint's original framework" + " or what is available in the environment." + ), ) parser.add_argument("output", type=Path, help="Path indicating where to store generated ONNX model.") parser.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.") diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index 3f18c369832bd0..eb57df1c960364 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -1,10 +1,11 @@ +import os from functools import partial, reduce from typing import TYPE_CHECKING, Callable, Dict, Optional, Tuple, Type, Union import transformers from .. import PretrainedConfig, is_tf_available, is_torch_available -from ..utils import logging +from ..utils import TF2_WEIGHTS_NAME, WEIGHTS_NAME, logging from .config import OnnxConfig @@ -566,9 +567,59 @@ def get_model_class_for_feature(feature: str, framework: str = "pt") -> Type: ) return task_to_automodel[task] + @staticmethod + def determine_framework(model: str, framework: str = None) -> str: + """ + Determines the framework to use for the export. + + The priority is in the following order: + 1. User input via `framework`. + 2. If local checkpoint is provided, use the same framework as the checkpoint. + 3. Available framework in environment, with priority given to PyTorch + + Args: + model (`str`): + The name of the model to export. + framework (`str`, *optional*, defaults to `None`): + The framework to use for the export. See above for priority if none provided. + + Returns: + The framework to use for the export. + + """ + if framework is not None: + return framework + + framework_map = {"pt": "PyTorch", "tf": "TensorFlow"} + exporter_map = {"pt": "torch", "tf": "tf2onnx"} + + if os.path.isdir(model): + if os.path.isfile(os.path.join(model, WEIGHTS_NAME)): + framework = "pt" + elif os.path.isfile(os.path.join(model, TF2_WEIGHTS_NAME)): + framework = "tf" + else: + raise FileNotFoundError( + "Cannot determine framework from given checkpoint location." + f" There should be a {WEIGHTS_NAME} for PyTorch" + f" or {TF2_WEIGHTS_NAME} for TensorFlow." + ) + logger.info(f"Local {framework_map[framework]} model found.") + else: + if is_torch_available(): + framework = "pt" + elif is_tf_available(): + framework = "tf" + else: + raise EnvironmentError("Neither PyTorch nor TensorFlow found in environment. Cannot export to ONNX.") + + logger.info(f"Framework not requested. Using {exporter_map[framework]} to export to ONNX.") + + return framework + @staticmethod def get_model_from_feature( - feature: str, model: str, framework: str = "pt", cache_dir: str = None + feature: str, model: str, framework: str = None, cache_dir: str = None ) -> Union["PreTrainedModel", "TFPreTrainedModel"]: """ Attempts to retrieve a model from a model's name and the feature to be enabled. @@ -578,20 +629,24 @@ def get_model_from_feature( The feature required. model (`str`): The name of the model to export. - framework (`str`, *optional*, defaults to `"pt"`): - The framework to use for the export. + framework (`str`, *optional*, defaults to `None`): + The framework to use for the export. See `FeaturesManager.determine_framework` for the priority should + none be provided. Returns: The instance of the model. """ + framework = FeaturesManager.determine_framework(model, framework) model_class = FeaturesManager.get_model_class_for_feature(feature, framework) try: model = model_class.from_pretrained(model, cache_dir=cache_dir) except OSError: if framework == "pt": + logger.info("Loading TensorFlow model in PyTorch before exporting to ONNX.") model = model_class.from_pretrained(model, from_tf=True, cache_dir=cache_dir) else: + logger.info("Loading PyTorch model in TensorFlow before exporting to ONNX.") model = model_class.from_pretrained(model, from_pt=True, cache_dir=cache_dir) return model diff --git a/tests/onnx/test_features.py b/tests/onnx/test_features.py new file mode 100644 index 00000000000000..4590ff0cc86cd3 --- /dev/null +++ b/tests/onnx/test_features.py @@ -0,0 +1,111 @@ +from tempfile import TemporaryDirectory +from unittest import TestCase +from unittest.mock import MagicMock, patch + +from transformers import AutoModel, TFAutoModel +from transformers.onnx import FeaturesManager +from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch + + +@require_torch +@require_tf +class DetermineFrameworkTest(TestCase): + """ + Test `FeaturesManager.determine_framework` + """ + + def setUp(self): + self.test_model = SMALL_MODEL_IDENTIFIER + self.framework_pt = "pt" + self.framework_tf = "tf" + + def _setup_pt_ckpt(self, save_dir): + model_pt = AutoModel.from_pretrained(self.test_model) + model_pt.save_pretrained(save_dir) + + def _setup_tf_ckpt(self, save_dir): + model_tf = TFAutoModel.from_pretrained(self.test_model, from_pt=True) + model_tf.save_pretrained(save_dir) + + def test_framework_provided(self): + """ + Ensure the that the provided framework is returned. + """ + mock_framework = "mock_framework" + + # Framework provided - return whatever the user provides + result = FeaturesManager.determine_framework(self.test_model, mock_framework) + self.assertEqual(result, mock_framework) + + # Local checkpoint and framework provided - return provided framework + # PyTorch checkpoint + with TemporaryDirectory() as local_pt_ckpt: + self._setup_pt_ckpt(local_pt_ckpt) + result = FeaturesManager.determine_framework(local_pt_ckpt, mock_framework) + self.assertEqual(result, mock_framework) + + # TensorFlow checkpoint + with TemporaryDirectory() as local_tf_ckpt: + self._setup_tf_ckpt(local_tf_ckpt) + result = FeaturesManager.determine_framework(local_tf_ckpt, mock_framework) + self.assertEqual(result, mock_framework) + + def test_checkpoint_provided(self): + """ + Ensure that the determined framework is the one used for the local checkpoint. + + For the functionality to execute, local checkpoints are provided but framework is not. + """ + # PyTorch checkpoint + with TemporaryDirectory() as local_pt_ckpt: + self._setup_pt_ckpt(local_pt_ckpt) + result = FeaturesManager.determine_framework(local_pt_ckpt) + self.assertEqual(result, self.framework_pt) + + # TensorFlow checkpoint + with TemporaryDirectory() as local_tf_ckpt: + self._setup_tf_ckpt(local_tf_ckpt) + result = FeaturesManager.determine_framework(local_tf_ckpt) + self.assertEqual(result, self.framework_tf) + + # Invalid local checkpoint + with TemporaryDirectory() as local_invalid_ckpt: + with self.assertRaises(FileNotFoundError): + result = FeaturesManager.determine_framework(local_invalid_ckpt) + + def test_from_environment(self): + """ + Ensure that the determined framework is the one available in the environment. + + For the functionality to execute, framework and local checkpoints are not provided. + """ + # Framework not provided, hub model is used (no local checkpoint directory) + # TensorFlow not in environment -> use PyTorch + mock_tf_available = MagicMock(return_value=False) + with patch("transformers.onnx.features.is_tf_available", mock_tf_available): + result = FeaturesManager.determine_framework(self.test_model) + self.assertEqual(result, self.framework_pt) + + # PyTorch not in environment -> use TensorFlow + mock_torch_available = MagicMock(return_value=False) + with patch("transformers.onnx.features.is_torch_available", mock_torch_available): + result = FeaturesManager.determine_framework(self.test_model) + self.assertEqual(result, self.framework_tf) + + # Both in environment -> use PyTorch + mock_tf_available = MagicMock(return_value=True) + mock_torch_available = MagicMock(return_value=True) + with patch("transformers.onnx.features.is_tf_available", mock_tf_available), patch( + "transformers.onnx.features.is_torch_available", mock_torch_available + ): + result = FeaturesManager.determine_framework(self.test_model) + self.assertEqual(result, self.framework_pt) + + # Both not in environment -> raise error + mock_tf_available = MagicMock(return_value=False) + mock_torch_available = MagicMock(return_value=False) + with patch("transformers.onnx.features.is_tf_available", mock_tf_available), patch( + "transformers.onnx.features.is_torch_available", mock_torch_available + ): + with self.assertRaises(EnvironmentError): + result = FeaturesManager.determine_framework(self.test_model) diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index ba122f43f805db..7a645bba12372d 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -434,7 +434,7 @@ def module_to_test_file(module_fname): return "tests/utils/test_cli.py" # Special case for onnx submodules elif len(splits) >= 2 and splits[-2] == "onnx": - return ["tests/onnx/test_onnx.py", "tests/onnx/test_onnx_v2.py"] + return ["tests/onnx/test_features.py", "tests/onnx/test_onnx.py", "tests/onnx/test_onnx_v2.py"] # Special case for utils (not the one in src/transformers, the ones at the root of the repo). elif len(splits) > 0 and splits[0] == "utils": default_test_file = f"tests/utils/test_utils_{module_name}" From e9442440fcd2c838f5991b29a9d08ee59b9ec180 Mon Sep 17 00:00:00 2001 From: Rahul A R Date: Thu, 25 Aug 2022 11:00:38 -0400 Subject: [PATCH 144/539] streamlining 'checkpointing_steps' parsing (#18755) --- .../run_image_classification_no_trainer.py | 9 +++------ examples/pytorch/language-modeling/run_clm_no_trainer.py | 9 +++------ examples/pytorch/language-modeling/run_mlm_no_trainer.py | 9 +++------ examples/pytorch/multiple-choice/run_swag_no_trainer.py | 9 +++------ .../question-answering/run_qa_beam_search_no_trainer.py | 9 +++------ examples/pytorch/question-answering/run_qa_no_trainer.py | 9 +++------ .../run_semantic_segmentation_no_trainer.py | 9 +++------ .../pytorch/text-classification/run_glue_no_trainer.py | 9 +++------ .../pytorch/token-classification/run_ner_no_trainer.py | 9 +++------ .../pytorch/translation/run_translation_no_trainer.py | 9 +++------ 10 files changed, 30 insertions(+), 60 deletions(-) diff --git a/examples/pytorch/image-classification/run_image_classification_no_trainer.py b/examples/pytorch/image-classification/run_image_classification_no_trainer.py index 69ee2875e61d1c..7037ab6c82bff5 100644 --- a/examples/pytorch/image-classification/run_image_classification_no_trainer.py +++ b/examples/pytorch/image-classification/run_image_classification_no_trainer.py @@ -406,12 +406,9 @@ def collate_fn(examples): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index d00d0792ab90ee..f5ea78f8328967 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -508,12 +508,9 @@ def group_texts(examples): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index 7da09e16c5914e..9dd519d11e3d10 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -552,12 +552,9 @@ def group_texts(examples): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. diff --git a/examples/pytorch/multiple-choice/run_swag_no_trainer.py b/examples/pytorch/multiple-choice/run_swag_no_trainer.py index 4cf2c9cc440204..aed2ad8aa99ac1 100755 --- a/examples/pytorch/multiple-choice/run_swag_no_trainer.py +++ b/examples/pytorch/multiple-choice/run_swag_no_trainer.py @@ -505,12 +505,9 @@ def preprocess_function(examples): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index 370dd3f43d9545..c3fdcdae9a8f12 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -764,12 +764,9 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index fb6e1e96d52582..926e24c4dd7b93 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -779,12 +779,9 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index a6550176aa37dc..dc1dba2f233b8b 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -475,12 +475,9 @@ def preprocess_val(example_batch): ) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # Scheduler and math around the number of training steps. overrode_max_train_steps = False diff --git a/examples/pytorch/text-classification/run_glue_no_trainer.py b/examples/pytorch/text-classification/run_glue_no_trainer.py index 1213460cfb970e..3720c9d09bb451 100644 --- a/examples/pytorch/text-classification/run_glue_no_trainer.py +++ b/examples/pytorch/text-classification/run_glue_no_trainer.py @@ -451,12 +451,9 @@ def preprocess_function(examples): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. diff --git a/examples/pytorch/token-classification/run_ner_no_trainer.py b/examples/pytorch/token-classification/run_ner_no_trainer.py index 1156e1f4f01bed..72b9ed3af4b833 100755 --- a/examples/pytorch/token-classification/run_ner_no_trainer.py +++ b/examples/pytorch/token-classification/run_ner_no_trainer.py @@ -566,12 +566,9 @@ def tokenize_and_align_labels(examples): args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. diff --git a/examples/pytorch/translation/run_translation_no_trainer.py b/examples/pytorch/translation/run_translation_no_trainer.py index 34c2ad1964090f..d16c7d5f1c8fd5 100644 --- a/examples/pytorch/translation/run_translation_no_trainer.py +++ b/examples/pytorch/translation/run_translation_no_trainer.py @@ -549,12 +549,9 @@ def preprocess_function(examples): # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states - if hasattr(args.checkpointing_steps, "isdigit"): - checkpointing_steps = args.checkpointing_steps - if args.checkpointing_steps.isdigit(): - checkpointing_steps = int(args.checkpointing_steps) - else: - checkpointing_steps = None + checkpointing_steps = args.checkpointing_steps + if checkpointing_steps is not None and checkpointing_steps.isdigit(): + checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # We initialize the trackers only on main process because `accelerator.log` From 06a6a4bd516f7d0ba7c4966a2d3d9c0bf07797ae Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Thu, 25 Aug 2022 17:08:05 +0100 Subject: [PATCH 145/539] CLI: Improved error control and updated hub requirement (#18752) --- src/transformers/commands/pt_to_tf.py | 40 +++++++++++++++------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/src/transformers/commands/pt_to_tf.py b/src/transformers/commands/pt_to_tf.py index fdd423430ee212..c126eeefcc063f 100644 --- a/src/transformers/commands/pt_to_tf.py +++ b/src/transformers/commands/pt_to_tf.py @@ -59,7 +59,7 @@ def convert_command_factory(args: Namespace): return PTtoTFCommand( args.model_name, args.local_dir, - args.max_hidden_error, + args.max_error, args.new_weights, args.no_pr, args.push, @@ -96,12 +96,11 @@ def register_subcommand(parser: ArgumentParser): help="Optional local directory of the model repository. Defaults to /tmp/{model_name}", ) train_parser.add_argument( - "--max-hidden-error", + "--max-error", type=float, default=MAX_ERROR, help=( - f"Maximum error tolerance for hidden layer outputs. Defaults to {MAX_ERROR}. If you suspect the hidden" - " layers outputs will be used for downstream applications, avoid increasing this tolerance." + f"Maximum error tolerance. Defaults to {MAX_ERROR}. This flag should be avoided, use at your own risk." ), ) train_parser.add_argument( @@ -168,7 +167,7 @@ def __init__( self, model_name: str, local_dir: str, - max_hidden_error: float, + max_error: float, new_weights: bool, no_pr: bool, push: bool, @@ -178,7 +177,7 @@ def __init__( self._logger = logging.get_logger("transformers-cli/pt_to_tf") self._model_name = model_name self._local_dir = local_dir if local_dir else os.path.join("/tmp", model_name) - self._max_hidden_error = max_hidden_error + self._max_error = max_error self._new_weights = new_weights self._no_pr = no_pr self._push = push @@ -239,9 +238,10 @@ def _get_audio_input(): return pt_input, tf_input def run(self): - if version.parse(huggingface_hub.__version__) < version.parse("0.8.1"): + # hub version 0.9.0 introduced the possibility of programmatically opening PRs with normal write tokens. + if version.parse(huggingface_hub.__version__) < version.parse("0.9.0"): raise ImportError( - "The huggingface_hub version must be >= 0.8.1 to use this command. Please update your huggingface_hub" + "The huggingface_hub version must be >= 0.9.0 to use this command. Please update your huggingface_hub" " installation." ) else: @@ -293,13 +293,13 @@ def run(self): ) max_crossload_output_diff = max(output_differences.values()) if output_differences else 0.0 max_crossload_hidden_diff = max(hidden_differences.values()) - if max_crossload_output_diff > MAX_ERROR or max_crossload_hidden_diff > self._max_hidden_error: + if max_crossload_output_diff > self._max_error or max_crossload_hidden_diff > self._max_error: raise ValueError( "The cross-loaded TensorFlow model has different outputs, something went wrong!\n" - + f"\nList of maximum output differences above the threshold ({MAX_ERROR}):\n" - + "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > MAX_ERROR]) - + f"\n\nList of maximum hidden layer differences above the threshold ({self._max_hidden_error}):\n" - + "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_hidden_error]) + + f"\nList of maximum output differences above the threshold ({self._max_error}):\n" + + "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > self._max_error]) + + f"\n\nList of maximum hidden layer differences above the threshold ({self._max_error}):\n" + + "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_error]) ) # Save the weights in a TF format (if needed) and confirms that the results are still good @@ -322,13 +322,13 @@ def run(self): ) max_conversion_output_diff = max(output_differences.values()) if output_differences else 0.0 max_conversion_hidden_diff = max(hidden_differences.values()) - if max_conversion_output_diff > MAX_ERROR or max_conversion_hidden_diff > self._max_hidden_error: + if max_conversion_output_diff > self._max_error or max_conversion_hidden_diff > self._max_error: raise ValueError( "The converted TensorFlow model has different outputs, something went wrong!\n" - + f"\nList of maximum output differences above the threshold ({MAX_ERROR}):\n" - + "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > MAX_ERROR]) - + f"\n\nList of maximum hidden layer differences above the threshold ({self._max_hidden_error}):\n" - + "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_hidden_error]) + + f"\nList of maximum output differences above the threshold ({self._max_error}):\n" + + "\n".join([f"{k}: {v:.3e}" for k, v in output_differences.items() if v > self._max_error]) + + f"\n\nList of maximum hidden layer differences above the threshold ({self._max_error}):\n" + + "\n".join([f"{k}: {v:.3e}" for k, v in hidden_differences.items() if v > self._max_error]) ) commit_message = "Update TF weights" if self._new_weights else "Add TF weights" @@ -348,6 +348,10 @@ def run(self): f"Maximum conversion output difference={max_conversion_output_diff:.3e}; " f"Maximum conversion hidden layer difference={max_conversion_hidden_diff:.3e};\n" ) + if self._max_error > MAX_ERROR: + commit_descrition += ( + f"\n\nCAUTION: The maximum admissible error was manually increased to {self._max_error}!" + ) if self._extra_commit_description: commit_descrition += "\n\n" + self._extra_commit_description From 8869bf41feaabeb2f99aa3975fee74be71d5fc03 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 26 Aug 2022 14:11:27 +0200 Subject: [PATCH 146/539] [VisionEncoderDecoder] Add gradient checkpointing (#18697) * add first generation tutorial * VisionEnocderDecoder gradient checkpointing * remove generation * add tests --- .../modeling_vision_encoder_decoder.py | 6 +++++ .../test_modeling_speech_encoder_decoder.py | 23 +++++++++++++++++++ .../test_modeling_vision_encoder_decoder.py | 23 +++++++++++++++++++ 3 files changed, 52 insertions(+) diff --git a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py index 4906c13a9c0276..a963561c2825c6 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.py @@ -155,6 +155,7 @@ class VisionEncoderDecoderModel(PreTrainedModel): config_class = VisionEncoderDecoderConfig base_model_prefix = "vision_encoder_decoder" main_input_name = "pixel_values" + supports_gradient_checkpointing = True def __init__( self, @@ -221,6 +222,11 @@ def __init__( f"The encoder {self.encoder} should not have a LM Head. Please use a model without LM Head" ) + def _set_gradient_checkpointing(self, module, value=False): + # call both encoder and decoder function on gradient checkpointing + self.encoder._set_gradient_checkpointing(module, value=value) + self.decoder._set_gradient_checkpointing(module, value=value) + def get_encoder(self): return self.encoder diff --git a/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py b/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py index 2d934744f9e424..f2415013026c72 100644 --- a/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py +++ b/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py @@ -396,6 +396,28 @@ def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) + def test_training_gradient_checkpointing(self): + inputs_dict = self.prepare_config_and_inputs() + encoder_model, decoder_model = self.get_encoder_decoder_model( + inputs_dict["config"], inputs_dict["decoder_config"] + ) + + model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) + model.train() + model.gradient_checkpointing_enable() + model.config.decoder_start_token_id = 0 + model.config.pad_token_id = 0 + + model_inputs = { + "attention_mask": inputs_dict["attention_mask"], + "labels": inputs_dict["labels"], + "decoder_input_ids": inputs_dict["decoder_input_ids"], + } + inputs = inputs_dict["input_features"] if "input_features" in inputs_dict else inputs_dict["input_values"] + + loss = model(inputs, **model_inputs).loss + loss.backward() + @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() @@ -590,6 +612,7 @@ def prepare_config_and_inputs(self): "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, + "labels": decoder_input_ids, } # there are no published pretrained Speech2Text2ForCausalLM for now diff --git a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py index 7570888097c533..fbac8b898a6ec9 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py @@ -324,6 +324,27 @@ def test_encoder_decoder_model_generate(self): input_ids_dict = self.prepare_config_and_inputs() self.check_encoder_decoder_model_generate(**input_ids_dict) + def test_training_gradient_checkpointing(self): + inputs_dict = self.prepare_config_and_inputs() + encoder_model, decoder_model = self.get_encoder_decoder_model( + inputs_dict["config"], inputs_dict["decoder_config"] + ) + + model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) + model.train() + model.gradient_checkpointing_enable() + model.config.decoder_start_token_id = 0 + model.config.pad_token_id = 0 + + model_inputs = { + "pixel_values": inputs_dict["pixel_values"], + "labels": inputs_dict["labels"], + "decoder_input_ids": inputs_dict["decoder_input_ids"], + } + + loss = model(**model_inputs).loss + loss.backward() + @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() @@ -547,6 +568,7 @@ def prepare_config_and_inputs(self): decoder_config_and_inputs = model_tester_decoder.prepare_config_and_inputs() config, pixel_values, _ = encoder_config_and_inputs decoder_config, decoder_inputs_dict = decoder_config_and_inputs + decoder_inputs_dict["labels"] = decoder_inputs_dict["decoder_input_ids"] # make sure that cross attention layers are added decoder_config.add_cross_attention = True @@ -644,6 +666,7 @@ def prepare_config_and_inputs(self): "decoder_config": decoder_config, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, + "labels": decoder_input_ids, } # there are no published pretrained TrOCR checkpoints for now From 62ceb4d661ce644ee9377ac8053cbb9afa737125 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 26 Aug 2022 14:11:55 +0200 Subject: [PATCH 147/539] [Wav2vec2 + LM Test] Improve wav2vec2 with lm tests and make torch version dependent for now (#18749) * add first generation tutorial * remove generation * make version dependent expected values * Apply suggestions from code review * Update tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py * fix typo --- .../test_processor_wav2vec2_with_lm.py | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py b/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py index d66a5923868dc5..6bf52d3e1b1bc9 100644 --- a/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py +++ b/tests/models/wav2vec2_with_lm/test_processor_wav2vec2_with_lm.py @@ -23,6 +23,7 @@ import datasets import numpy as np from datasets import load_dataset +from packaging import version from transformers import AutoProcessor from transformers.models.wav2vec2 import Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor @@ -435,21 +436,19 @@ def test_word_time_stamp_integration(self): self.assertEqual(" ".join(self.get_from_offsets(word_time_stamps, "word")), output.text) # output times - start_times = [round(x, 2) for x in self.get_from_offsets(word_time_stamps, "start_time")] - end_times = [round(x, 2) for x in self.get_from_offsets(word_time_stamps, "end_time")] + start_times = torch.tensor(self.get_from_offsets(word_time_stamps, "start_time")) + end_times = torch.tensor(self.get_from_offsets(word_time_stamps, "end_time")) # fmt: off - self.assertListEqual( - start_times, - [ - 1.42, 1.64, 2.12, 2.26, 2.54, 3.0, 3.24, 3.6, 3.8, 4.1, 4.26, 4.94, 5.28, 5.66, 5.78, 5.94, 6.32, 6.54, 6.66, - ], - ) - - self.assertListEqual( - end_times, - [ - 1.54, 1.88, 2.14, 2.46, 2.9, 3.18, 3.54, 3.72, 4.02, 4.18, 4.76, 5.16, 5.56, 5.7, 5.86, 6.2, 6.38, 6.62, 6.94, - ], - ) + expected_start_tensor = torch.tensor([1.42, 1.64, 2.12, 2.26, 2.54, 3.0, 3.24, 3.6, 3.8, 4.1, 4.26, 4.94, 5.28, 5.66, 5.78, 5.94, 6.32, 6.54, 6.66]) + + # TODO(Patrick): This if-else version statement should be removed once + # https://github.com/huggingface/datasets/issues/4889 is resolved + if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.12.0"): + expected_end_tensor = torch.tensor([1.54, 1.88, 2.14, 2.46, 2.9, 3.16, 3.54, 3.72, 4.02, 4.18, 4.76, 5.16, 5.56, 5.7, 5.86, 6.2, 6.38, 6.62, 6.94]) + else: + expected_end_tensor = torch.tensor([1.54, 1.88, 2.14, 2.46, 2.9, 3.18, 3.54, 3.72, 4.02, 4.18, 4.76, 5.16, 5.56, 5.7, 5.86, 6.2, 6.38, 6.62, 6.94]) # fmt: on + + self.assertTrue(torch.allclose(start_times, expected_start_tensor, atol=0.01)) + self.assertTrue(torch.allclose(end_times, expected_end_tensor, atol=0.01)) From 21f6f58721dd9154357576be6de54eefef1f1818 Mon Sep 17 00:00:00 2001 From: "Duong A. Nguyen" <38061659+duongna21@users.noreply.github.com> Date: Sat, 27 Aug 2022 02:04:18 +0700 Subject: [PATCH 148/539] Fix incomplete outputs of FlaxBert (#18772) * Fix incomplete FlaxBert outputs * fix big_bird electra roberta --- src/transformers/models/bert/modeling_flax_bert.py | 2 +- src/transformers/models/big_bird/modeling_flax_big_bird.py | 2 +- src/transformers/models/electra/modeling_flax_electra.py | 2 +- src/transformers/models/roberta/modeling_flax_roberta.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/bert/modeling_flax_bert.py b/src/transformers/models/bert/modeling_flax_bert.py index 8daa866be10561..0cdf622f33e8a3 100644 --- a/src/transformers/models/bert/modeling_flax_bert.py +++ b/src/transformers/models/bert/modeling_flax_bert.py @@ -612,7 +612,7 @@ def __call__( if output_hidden_states: all_hidden_states += (hidden_states,) - outputs = (hidden_states,) + outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) diff --git a/src/transformers/models/big_bird/modeling_flax_big_bird.py b/src/transformers/models/big_bird/modeling_flax_big_bird.py index 2e3192ff0eeb02..140beb64239e5e 100644 --- a/src/transformers/models/big_bird/modeling_flax_big_bird.py +++ b/src/transformers/models/big_bird/modeling_flax_big_bird.py @@ -1438,7 +1438,7 @@ def __call__( if output_hidden_states: all_hidden_states += (hidden_states,) - outputs = (hidden_states,) + outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) diff --git a/src/transformers/models/electra/modeling_flax_electra.py b/src/transformers/models/electra/modeling_flax_electra.py index 5f02c01a650e12..99f193f590ca2a 100644 --- a/src/transformers/models/electra/modeling_flax_electra.py +++ b/src/transformers/models/electra/modeling_flax_electra.py @@ -590,7 +590,7 @@ def __call__( if output_hidden_states: all_hidden_states += (hidden_states,) - outputs = (hidden_states,) + outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) diff --git a/src/transformers/models/roberta/modeling_flax_roberta.py b/src/transformers/models/roberta/modeling_flax_roberta.py index ddd6359b36be83..5cc3da84cc3c1d 100644 --- a/src/transformers/models/roberta/modeling_flax_roberta.py +++ b/src/transformers/models/roberta/modeling_flax_roberta.py @@ -580,7 +580,7 @@ def __call__( if output_hidden_states: all_hidden_states += (hidden_states,) - outputs = (hidden_states,) + outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) From f2fbe4475386bfcfb3b83d0a3223ba216a3c3a91 Mon Sep 17 00:00:00 2001 From: Philipp Schmid <32632186+philschmid@users.noreply.github.com> Date: Mon, 29 Aug 2022 04:32:19 +0200 Subject: [PATCH 149/539] Fix broken link DeepSpeed documentation link (#18783) * Fix broken link * Trigger CI Co-authored-by: Stas Bekman --- docs/source/en/main_classes/deepspeed.mdx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/source/en/main_classes/deepspeed.mdx b/docs/source/en/main_classes/deepspeed.mdx index 11831dbdc401da..a0d6dcc7769e79 100644 --- a/docs/source/en/main_classes/deepspeed.mdx +++ b/docs/source/en/main_classes/deepspeed.mdx @@ -37,7 +37,7 @@ won't be possible on a single GPU. 2. If you don't use [`Trainer`] and want to use your own Trainer where you integrated DeepSpeed yourself, core functionality functions like `from_pretrained` and `from_config` include integration of essential parts of DeepSpeed like `zero.Init` for ZeRO stage 3 and higher. To tap into this feature read the docs on - [deepspeed-non-trainer-integration](#deepspeed-non-trainer-integration). + [non-Trainer DeepSpeed Integration](#nontrainer-deepspeed-integration). What is integrated: @@ -1849,7 +1849,6 @@ In this case you usually need to raise the value of `initial_scale_power`. Setti - ## Non-Trainer Deepspeed Integration From 5f06a09b9f3f05b4860f11bbbe22861923b49d81 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 29 Aug 2022 09:10:13 +0200 Subject: [PATCH 150/539] fix missing block when there is no failure (#18775) Co-authored-by: ydshieh --- utils/notification_service_doc_tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/notification_service_doc_tests.py b/utils/notification_service_doc_tests.py index d02b08b605e116..7d5605c1cae3b0 100644 --- a/utils/notification_service_doc_tests.py +++ b/utils/notification_service_doc_tests.py @@ -167,7 +167,7 @@ def payload(self) -> str: if self.n_failures > 0: blocks.extend([self.category_failures]) - if self.no_failures == 0: + if self.n_failures == 0: blocks.append(self.no_failures) return json.dumps(blocks) From b10a3b3760eac3c3d201bfcad1d9ee1e3b26f3c0 Mon Sep 17 00:00:00 2001 From: fatih <34196005+fcakyon@users.noreply.github.com> Date: Mon, 29 Aug 2022 12:24:53 +0300 Subject: [PATCH 151/539] fix a possible typo in auto feature extraction (#18779) --- src/transformers/models/auto/feature_extraction_auto.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index 5c5f86d040c8fe..3058aaa4334a20 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -69,7 +69,7 @@ ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("van", "ConvNextFeatureExtractor"), - ("videomae", "ViTFeatureExtractor"), + ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), From 8b67f20935e48b26c5803cf31e0e89b9cfaa22ab Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 29 Aug 2022 11:43:20 +0200 Subject: [PATCH 152/539] Fix memory leak issue in `torch_fx` tests (#18547) Co-authored-by: Lysandre Debut Co-authored-by: ydshieh --- tests/test_modeling_common.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 8f80d7fa42f791..5a2888a12aafab 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -859,6 +859,10 @@ def flatten_output(output): f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) + # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. + # (Even with this call, there are still memory leak by ~0.04MB) + self.clear_torch_jit_class_registry() + def test_headmasking(self): if not self.test_head_masking: return From 169b8cde471ec48e557f4c2d0c8e0d40abb13040 Mon Sep 17 00:00:00 2001 From: Lucain Date: Mon, 29 Aug 2022 15:56:08 +0200 Subject: [PATCH 153/539] Fix mock in `test_cached_files_are_used_when_internet_is_down` (#18804) --- tests/test_configuration_common.py | 1 + tests/test_feature_extraction_common.py | 1 + tests/test_modeling_common.py | 1 + tests/test_modeling_tf_common.py | 1 + tests/test_tokenization_common.py | 1 + 5 files changed, 5 insertions(+) diff --git a/tests/test_configuration_common.py b/tests/test_configuration_common.py index 5447fb6afb70eb..a7283b5f31232d 100644 --- a/tests/test_configuration_common.py +++ b/tests/test_configuration_common.py @@ -349,6 +349,7 @@ def test_cached_files_are_used_when_internet_is_down(self): response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") diff --git a/tests/test_feature_extraction_common.py b/tests/test_feature_extraction_common.py index 3ecf89a908672f..61bd85e8922107 100644 --- a/tests/test_feature_extraction_common.py +++ b/tests/test_feature_extraction_common.py @@ -172,6 +172,7 @@ def test_cached_files_are_used_when_internet_is_down(self): response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = Wav2Vec2FeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2") diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 5a2888a12aafab..05921334a6b8bb 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2931,6 +2931,7 @@ def test_cached_files_are_used_when_internet_is_down(self): response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index abf26af2b65116..3372e96454d7c2 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -1924,6 +1924,7 @@ def test_cached_files_are_used_when_internet_is_down(self): response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert") diff --git a/tests/test_tokenization_common.py b/tests/test_tokenization_common.py index e1522ba3c66051..bdb7b6ce673896 100644 --- a/tests/test_tokenization_common.py +++ b/tests/test_tokenization_common.py @@ -3875,6 +3875,7 @@ def test_cached_files_are_used_when_internet_is_down(self): response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") From f1fd4606948af09125d2b5d4c1a7eba678e9700a Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Mon, 29 Aug 2022 18:46:07 +0200 Subject: [PATCH 154/539] Add SegFormer and ViLT links (#18808) Co-authored-by: Niels Rogge --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 30bc6d870bbf01..5f89bacf6415d2 100644 --- a/README.md +++ b/README.md @@ -87,12 +87,16 @@ Here are a few examples: In Computer Vision: - [Image classification with ViT](https://huggingface.co/google/vit-base-patch16-224) - [Object Detection with DETR](https://huggingface.co/facebook/detr-resnet-50) -- [Image Segmentation with DETR](https://huggingface.co/facebook/detr-resnet-50-panoptic) +- [Semantic Segmentation with SegFormer](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512) +- [Panoptic Segmentation with DETR](https://huggingface.co/facebook/detr-resnet-50-panoptic) In Audio: - [Automatic Speech Recognition with Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h) - [Keyword Spotting with Wav2Vec2](https://huggingface.co/superb/wav2vec2-base-superb-ks) +In Multimodal tasks: +- [Visual Question Answering with ViLT](https://huggingface.co/dandelin/vilt-b32-finetuned-vqa) + **[Write With Transformer](https://transformer.huggingface.co)**, built by the Hugging Face team, is the official demo of this repo’s text generation capabilities. ## If you are looking for custom support from the Hugging Face team From da5bb2921907c398e61ea1b73fd22d13938fc427 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 29 Aug 2022 18:46:30 +0200 Subject: [PATCH 155/539] send model to the correct device (#18800) Co-authored-by: ydshieh --- .../test_modeling_speech_encoder_decoder.py | 1 + .../test_modeling_vision_encoder_decoder.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py b/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py index f2415013026c72..3ecca17324a34e 100644 --- a/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py +++ b/tests/models/speech_encoder_decoder/test_modeling_speech_encoder_decoder.py @@ -403,6 +403,7 @@ def test_training_gradient_checkpointing(self): ) model = SpeechEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) + model.to(torch_device) model.train() model.gradient_checkpointing_enable() model.config.decoder_start_token_id = 0 diff --git a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py index fbac8b898a6ec9..279614371bf881 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_vision_encoder_decoder.py @@ -331,6 +331,7 @@ def test_training_gradient_checkpointing(self): ) model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model) + model.to(torch_device) model.train() model.gradient_checkpointing_enable() model.config.decoder_start_token_id = 0 From 8c4a11493f2c7a9153d1711150a8830b96eacd2e Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 29 Aug 2022 17:48:24 +0100 Subject: [PATCH 156/539] Revert to and safely handle flag in owlvit config (#18750) --- src/transformers/image_utils.py | 8 ++++---- .../models/owlvit/feature_extraction_owlvit.py | 7 +++++++ 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/transformers/image_utils.py b/src/transformers/image_utils.py index 120d7b3c1bd26c..437e7c5685586b 100644 --- a/src/transformers/image_utils.py +++ b/src/transformers/image_utils.py @@ -131,7 +131,7 @@ def convert_rgb(self, image): return image.convert("RGB") - def rescale_image(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray: + def rescale(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray: """ Rescale a numpy image by scale amount """ @@ -163,7 +163,7 @@ def to_numpy_array(self, image, rescale=None, channel_first=True): rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale if rescale: - image = self.rescale_image(image.astype(np.float32), 1 / 255.0) + image = self.rescale(image.astype(np.float32), 1 / 255.0) if channel_first and image.ndim == 3: image = image.transpose(2, 0, 1) @@ -214,9 +214,9 @@ def normalize(self, image, mean, std, rescale=False): # type it may need rescaling. elif rescale: if isinstance(image, np.ndarray): - image = self.rescale_image(image.astype(np.float32), 1 / 255.0) + image = self.rescale(image.astype(np.float32), 1 / 255.0) elif is_torch_tensor(image): - image = self.rescale_image(image.float(), 1 / 255.0) + image = self.rescale(image.float(), 1 / 255.0) if isinstance(image, np.ndarray): if not isinstance(mean, np.ndarray): diff --git a/src/transformers/models/owlvit/feature_extraction_owlvit.py b/src/transformers/models/owlvit/feature_extraction_owlvit.py index f8a45706835d8f..0af33eccaef044 100644 --- a/src/transformers/models/owlvit/feature_extraction_owlvit.py +++ b/src/transformers/models/owlvit/feature_extraction_owlvit.py @@ -85,6 +85,13 @@ def __init__( image_std=None, **kwargs ): + # Early versions of the OWL-ViT config on the hub had "rescale" as a flag. This clashes with the + # vision feature extractor method `rescale` as it would be set as an attribute during the super().__init__ + # call. This is for backwards compatibility. + if "rescale" in kwargs: + rescale_val = kwargs.pop("rescale") + kwargs["do_rescale"] = rescale_val + super().__init__(**kwargs) self.size = size self.resample = resample From da02b4035c3cd972ce6dd67de47a88434a105550 Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Tue, 30 Aug 2022 15:49:03 +0530 Subject: [PATCH 157/539] Add docstring for BartForCausalLM (#18795) * add docstring for BartForCausalLM * doc-style fic --- src/transformers/models/bart/modeling_bart.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/transformers/models/bart/modeling_bart.py b/src/transformers/models/bart/modeling_bart.py index 525da6f34b06cf..372e249a3094c2 100755 --- a/src/transformers/models/bart/modeling_bart.py +++ b/src/transformers/models/bart/modeling_bart.py @@ -1702,6 +1702,12 @@ def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) +@add_start_docstrings( + """ + BART decoder with with a language modeling head on top (linear layer with weights tied to the input embeddings). + """, + BART_START_DOCSTRING, +) class BartForCausalLM(BartPretrainedModel): def __init__(self, config): config = copy.deepcopy(config) From 5c702175eb0eb16c62ae6e8dfc60a8f71f8b3697 Mon Sep 17 00:00:00 2001 From: "Li-Huai (Allan) Lin" Date: Tue, 30 Aug 2022 18:30:46 +0800 Subject: [PATCH 158/539] up (#18805) --- src/transformers/models/luke/modeling_luke.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/luke/modeling_luke.py b/src/transformers/models/luke/modeling_luke.py index 6d40dfafe8e477..befeaccd55f6ee 100644 --- a/src/transformers/models/luke/modeling_luke.py +++ b/src/transformers/models/luke/modeling_luke.py @@ -226,7 +226,7 @@ class EntitySpanClassificationOutput(ModelOutput): Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification loss. - logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + logits (`torch.FloatTensor` of shape `(batch_size, entity_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of From 7320d95d98f51ba056cbfe58f08146420c7ec8af Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Tue, 30 Aug 2022 12:31:34 +0200 Subject: [PATCH 159/539] [Swin, Swinv2] Fix attn_mask dtype (#18803) * Add dtype * Fix Swinv2 as well Co-authored-by: Niels Rogge --- src/transformers/models/donut/modeling_donut_swin.py | 6 +++--- src/transformers/models/swin/modeling_swin.py | 6 +++--- src/transformers/models/swinv2/modeling_swinv2.py | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/donut/modeling_donut_swin.py b/src/transformers/models/donut/modeling_donut_swin.py index 78e5cc81c19885..4f23d979361501 100644 --- a/src/transformers/models/donut/modeling_donut_swin.py +++ b/src/transformers/models/donut/modeling_donut_swin.py @@ -538,10 +538,10 @@ def set_shift_and_window_size(self, input_resolution): self.shift_size = 0 self.window_size = min(input_resolution) - def get_attn_mask(self, height, width): + def get_attn_mask(self, height, width, dtype): if self.shift_size > 0: # calculate attention mask for SW-MSA - img_mask = torch.zeros((1, height, width, 1)) + img_mask = torch.zeros((1, height, width, 1), dtype=dtype) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), @@ -600,7 +600,7 @@ def forward( # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) - attn_mask = self.get_attn_mask(height_pad, width_pad) + attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) diff --git a/src/transformers/models/swin/modeling_swin.py b/src/transformers/models/swin/modeling_swin.py index 48c9b8cccf9ec0..58d01d1cdfd45d 100644 --- a/src/transformers/models/swin/modeling_swin.py +++ b/src/transformers/models/swin/modeling_swin.py @@ -604,10 +604,10 @@ def set_shift_and_window_size(self, input_resolution): self.shift_size = 0 self.window_size = min(input_resolution) - def get_attn_mask(self, height, width): + def get_attn_mask(self, height, width, dtype): if self.shift_size > 0: # calculate attention mask for SW-MSA - img_mask = torch.zeros((1, height, width, 1)) + img_mask = torch.zeros((1, height, width, 1), dtype=dtype) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), @@ -666,7 +666,7 @@ def forward( # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) - attn_mask = self.get_attn_mask(height_pad, width_pad) + attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) diff --git a/src/transformers/models/swinv2/modeling_swinv2.py b/src/transformers/models/swinv2/modeling_swinv2.py index 52f836d5b91d38..890530691dd3a6 100644 --- a/src/transformers/models/swinv2/modeling_swinv2.py +++ b/src/transformers/models/swinv2/modeling_swinv2.py @@ -676,10 +676,10 @@ def set_shift_and_window_size(self, input_resolution): else target_shift_size[0] ) - def get_attn_mask(self, height, width): + def get_attn_mask(self, height, width, dtype): if self.shift_size > 0: # calculate attention mask for shifted window multihead self attention - img_mask = torch.zeros((1, height, width, 1)) + img_mask = torch.zeros((1, height, width, 1), dtype=dtype) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), @@ -736,7 +736,7 @@ def forward( # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) - attn_mask = self.get_attn_mask(height_pad, width_pad) + attn_mask = self.get_attn_mask(height_pad, width_pad, dtype=hidden_states.dtype) if attn_mask is not None: attn_mask = attn_mask.to(hidden_states_windows.device) From de8548ebf3242305d0f9792dacb6f86b196a3a33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoffer=20Koo=20=C3=98hrstr=C3=B8m?= <2357447+ChrisFugl@users.noreply.github.com> Date: Tue, 30 Aug 2022 12:48:11 +0200 Subject: [PATCH 160/539] [LayoutLMv3] Add TensorFlow implementation (#18678) Co-authored-by: Esben Toke Christensen Co-authored-by: Lasse Reedtz Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Co-authored-by: Joao Gante --- docs/source/en/index.mdx | 4 +- docs/source/en/model_doc/layoutlmv3.mdx | 28 +- docs/source/it/index.mdx | 4 +- src/transformers/__init__.py | 18 + .../models/auto/modeling_tf_auto.py | 6 +- .../models/layoutlmv3/__init__.py | 31 + .../layoutlmv3/modeling_tf_layoutlmv3.py | 1610 +++++++++++++++++ src/transformers/utils/dummy_tf_objects.py | 38 + .../layoutlmv3/test_modeling_tf_layoutlmv3.py | 497 +++++ utils/documentation_tests.txt | 1 + 10 files changed, 2227 insertions(+), 10 deletions(-) create mode 100644 src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py create mode 100644 tests/models/layoutlmv3/test_modeling_tf_layoutlmv3.py diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 17c04376780afe..ed04cad3dd9bf0 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -38,7 +38,7 @@ The documentation is organized in five parts: - **GET STARTED** contains a quick tour and installation instructions to get up and running with 🤗 Transformers. - **TUTORIALS** are a great place to begin if you are new to our library. This section will help you gain the basic skills you need to start using 🤗 Transformers. - **HOW-TO GUIDES** will show you how to achieve a specific goal like fine-tuning a pretrained model for language modeling or how to create a custom model head. -- **CONCEPTUAL GUIDES** provides more discussion and explanation of the underlying concepts and ideas behind models, tasks, and the design philosophy of 🤗 Transformers. +- **CONCEPTUAL GUIDES** provides more discussion and explanation of the underlying concepts and ideas behind models, tasks, and the design philosophy of 🤗 Transformers. - **API** describes each class and function, grouped in: - **MAIN CLASSES** for the main classes exposing the important APIs of the library. @@ -245,7 +245,7 @@ Flax), PyTorch, and/or TensorFlow. | ImageGPT | ❌ | ❌ | ✅ | ❌ | ❌ | | LayoutLM | ✅ | ✅ | ✅ | ✅ | ❌ | | LayoutLMv2 | ✅ | ✅ | ✅ | ❌ | ❌ | -| LayoutLMv3 | ✅ | ✅ | ✅ | ❌ | ❌ | +| LayoutLMv3 | ✅ | ✅ | ✅ | ✅ | ❌ | | LED | ✅ | ✅ | ✅ | ✅ | ❌ | | LeViT | ❌ | ❌ | ✅ | ❌ | ❌ | | Longformer | ✅ | ✅ | ✅ | ✅ | ❌ | diff --git a/docs/source/en/model_doc/layoutlmv3.mdx b/docs/source/en/model_doc/layoutlmv3.mdx index 8f115cf96ea56e..37fb0dc30446af 100644 --- a/docs/source/en/model_doc/layoutlmv3.mdx +++ b/docs/source/en/model_doc/layoutlmv3.mdx @@ -26,18 +26,18 @@ Tips: - In terms of data processing, LayoutLMv3 is identical to its predecessor [LayoutLMv2](layoutlmv2), except that: - images need to be resized and normalized with channels in regular RGB format. LayoutLMv2 on the other hand normalizes the images internally and expects the channels in BGR format. - - text is tokenized using byte-pair encoding (BPE), as opposed to WordPiece. + - text is tokenized using byte-pair encoding (BPE), as opposed to WordPiece. Due to these differences in data preprocessing, one can use [`LayoutLMv3Processor`] which internally combines a [`LayoutLMv3FeatureExtractor`] (for the image modality) and a [`LayoutLMv3Tokenizer`]/[`LayoutLMv3TokenizerFast`] (for the text modality) to prepare all data for the model. -- Regarding usage of [`LayoutLMv3Processor`], we refer to the [usage guide](layoutlmv2#usage-layoutlmv2processor) of its predecessor. +- Regarding usage of [`LayoutLMv3Processor`], we refer to the [usage guide](layoutlmv2#usage-layoutlmv2processor) of its predecessor. - Demo notebooks for LayoutLMv3 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3). - Demo scripts can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research_projects/layoutlmv3). +alt="drawing" width="600"/> LayoutLMv3 architecture. Taken from the original paper. -This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/layoutlmv3). +This model was contributed by [nielsr](https://huggingface.co/nielsr). The TensorFlow version of this model was added by [chriskoo](https://huggingface.co/chriskoo), [tokec](https://huggingface.co/tokec), and [lre](https://huggingface.co/lre). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/layoutlmv3). ## LayoutLMv3Config @@ -84,3 +84,23 @@ This model was contributed by [nielsr](https://huggingface.co/nielsr). The origi [[autodoc]] LayoutLMv3ForQuestionAnswering - forward + +## TFLayoutLMv3Model + +[[autodoc]] TFLayoutLMv3Model + - call + +## TFLayoutLMv3ForSequenceClassification + +[[autodoc]] TFLayoutLMv3ForSequenceClassification + - call + +## TFLayoutLMv3ForTokenClassification + +[[autodoc]] TFLayoutLMv3ForTokenClassification + - call + +## TFLayoutLMv3ForQuestionAnswering + +[[autodoc]] TFLayoutLMv3ForQuestionAnswering + - call diff --git a/docs/source/it/index.mdx b/docs/source/it/index.mdx index d5e10b7c4983cb..3ee8da15ed2d03 100644 --- a/docs/source/it/index.mdx +++ b/docs/source/it/index.mdx @@ -221,7 +221,7 @@ tokenizer (chiamato "slow"). Un tokenizer "fast" supportato dalla libreria 🤗 | ImageGPT | ❌ | ❌ | ✅ | ❌ | ❌ | | LayoutLM | ✅ | ✅ | ✅ | ✅ | ❌ | | LayoutLMv2 | ✅ | ✅ | ✅ | ❌ | ❌ | -| LayoutLMv3 | ✅ | ✅ | ✅ | ❌ | ❌ | +| LayoutLMv3 | ✅ | ✅ | ✅ | ✅ | ❌ | | LED | ✅ | ✅ | ✅ | ✅ | ❌ | | Longformer | ✅ | ✅ | ✅ | ✅ | ❌ | | LUKE | ✅ | ❌ | ✅ | ❌ | ❌ | @@ -288,4 +288,4 @@ tokenizer (chiamato "slow"). Un tokenizer "fast" supportato dalla libreria 🤗 | YOLOS | ❌ | ❌ | ✅ | ❌ | ❌ | | YOSO | ❌ | ❌ | ✅ | ❌ | ❌ | - \ No newline at end of file + diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 3281d266a2f3ce..bb64fe9295dad7 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -2343,6 +2343,16 @@ "TFLayoutLMPreTrainedModel", ] ) + _import_structure["models.layoutlmv3"].extend( + [ + "TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFLayoutLMv3ForQuestionAnswering", + "TFLayoutLMv3ForSequenceClassification", + "TFLayoutLMv3ForTokenClassification", + "TFLayoutLMv3Model", + "TFLayoutLMv3PreTrainedModel", + ] + ) _import_structure["models.led"].extend(["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"]) _import_structure["models.longformer"].extend( [ @@ -4801,6 +4811,14 @@ TFHubertModel, TFHubertPreTrainedModel, ) + from .models.layoutlmv3 import ( + TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, + TFLayoutLMv3ForQuestionAnswering, + TFLayoutLMv3ForSequenceClassification, + TFLayoutLMv3ForTokenClassification, + TFLayoutLMv3Model, + TFLayoutLMv3PreTrainedModel, + ) from .models.led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel from .models.longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index 359e1f05c47b55..991bb79a6b3719 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -52,6 +52,7 @@ ("gptj", "TFGPTJModel"), ("hubert", "TFHubertModel"), ("layoutlm", "TFLayoutLMModel"), + ("layoutlmv3", "TFLayoutLMv3Model"), ("led", "TFLEDModel"), ("longformer", "TFLongformerModel"), ("lxmert", "TFLxmertModel"), @@ -268,6 +269,7 @@ ("gpt2", "TFGPT2ForSequenceClassification"), ("gptj", "TFGPTJForSequenceClassification"), ("layoutlm", "TFLayoutLMForSequenceClassification"), + ("layoutlmv3", "TFLayoutLMv3ForSequenceClassification"), ("longformer", "TFLongformerForSequenceClassification"), ("mobilebert", "TFMobileBertForSequenceClassification"), ("mpnet", "TFMPNetForSequenceClassification"), @@ -297,6 +299,7 @@ ("flaubert", "TFFlaubertForQuestionAnsweringSimple"), ("funnel", "TFFunnelForQuestionAnswering"), ("gptj", "TFGPTJForQuestionAnswering"), + ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"), ("longformer", "TFLongformerForQuestionAnswering"), ("mobilebert", "TFMobileBertForQuestionAnswering"), ("mpnet", "TFMPNetForQuestionAnswering"), @@ -316,7 +319,6 @@ ] ) - TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ # Model for Token Classification mapping @@ -331,6 +333,7 @@ ("flaubert", "TFFlaubertForTokenClassification"), ("funnel", "TFFunnelForTokenClassification"), ("layoutlm", "TFLayoutLMForTokenClassification"), + ("layoutlmv3", "TFLayoutLMv3ForTokenClassification"), ("longformer", "TFLongformerForTokenClassification"), ("mobilebert", "TFMobileBertForTokenClassification"), ("mpnet", "TFMPNetForTokenClassification"), @@ -373,7 +376,6 @@ ] ) - TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES) TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES) TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES) diff --git a/src/transformers/models/layoutlmv3/__init__.py b/src/transformers/models/layoutlmv3/__init__.py index cfa26057e87b36..68a07362dc412e 100644 --- a/src/transformers/models/layoutlmv3/__init__.py +++ b/src/transformers/models/layoutlmv3/__init__.py @@ -21,6 +21,7 @@ from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, + is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, @@ -60,6 +61,21 @@ "LayoutLMv3PreTrainedModel", ] +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_layoutlmv3"] = [ + "TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFLayoutLMv3ForQuestionAnswering", + "TFLayoutLMv3ForSequenceClassification", + "TFLayoutLMv3ForTokenClassification", + "TFLayoutLMv3Model", + "TFLayoutLMv3PreTrainedModel", + ] + try: if not is_vision_available(): raise OptionalDependencyNotAvailable() @@ -101,6 +117,21 @@ LayoutLMv3PreTrainedModel, ) + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_layoutlmv3 import ( + TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, + TFLayoutLMv3ForQuestionAnswering, + TFLayoutLMv3ForSequenceClassification, + TFLayoutLMv3ForTokenClassification, + TFLayoutLMv3Model, + TFLayoutLMv3PreTrainedModel, + ) + try: if not is_vision_available(): raise OptionalDependencyNotAvailable() diff --git a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py new file mode 100644 index 00000000000000..85a44e4ff52ae8 --- /dev/null +++ b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py @@ -0,0 +1,1610 @@ +# coding=utf-8 +# Copyright 2022 Microsoft Research and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""TF 2.0 LayoutLMv3 model.""" + +import collections +import math +from typing import Dict, List, Optional, Tuple, Union + +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import ( + TFBaseModelOutput, + TFQuestionAnsweringModelOutput, + TFSequenceClassifierOutput, + TFTokenClassifierOutput, +) +from ...modeling_tf_utils import ( + TFPreTrainedModel, + TFQuestionAnsweringLoss, + TFSequenceClassificationLoss, + TFTokenClassificationLoss, + get_initializer, + keras_serializable, + unpack_inputs, +) +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings +from .configuration_layoutlmv3 import LayoutLMv3Config + + +_CONFIG_FOR_DOC = "LayoutLMv3Config" + +_DUMMY_INPUT_IDS = [ + [7, 6, 1], + [1, 2, 0], +] + +_DUMMY_BBOX = [ + [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], + [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]], +] + +TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "microsoft/layoutlmv3-base", + "microsoft/layoutlmv3-large", + # See all LayoutLMv3 models at https://huggingface.co/models?filter=layoutlmv3 +] + +LARGE_NEGATIVE = -1e8 + + +class TFLayoutLMv3PatchEmbeddings(tf.keras.layers.Layer): + """LayoutLMv3 image (patch) embeddings.""" + + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + patch_sizes = ( + config.patch_size + if isinstance(config.patch_size, collections.abc.Iterable) + else (config.patch_size, config.patch_size) + ) + self.proj = tf.keras.layers.Conv2D( + filters=config.hidden_size, + kernel_size=patch_sizes, + strides=patch_sizes, + padding="valid", + data_format="channels_last", + use_bias=True, + kernel_initializer=get_initializer(config.initializer_range), + name="proj", + ) + self.hidden_size = config.hidden_size + self.num_patches = (config.input_size**2) // (patch_sizes[0] * patch_sizes[1]) + + def call(self, pixel_values: tf.Tensor) -> tf.Tensor: + # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. + # So change the input format from `NCHW` to `NHWC`. + pixel_values = tf.transpose(pixel_values, perm=[0, 2, 3, 1]) + + embeddings = self.proj(pixel_values) + embeddings = tf.reshape(embeddings, (-1, self.num_patches, self.hidden_size)) + return embeddings + + +class TFLayoutLMv3TextEmbeddings(tf.keras.layers.Layer): + """ + LayoutLMv3 text embeddings. Same as `RobertaEmbeddings` but with added spatial (layout) embeddings. + """ + + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + self.word_embeddings = tf.keras.layers.Embedding( + config.vocab_size, + config.hidden_size, + embeddings_initializer=get_initializer(config.initializer_range), + name="word_embeddings", + ) + self.token_type_embeddings = tf.keras.layers.Embedding( + config.type_vocab_size, + config.hidden_size, + embeddings_initializer=get_initializer(config.initializer_range), + name="token_type_embeddings", + ) + self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.padding_token_index = config.pad_token_id + self.position_embeddings = tf.keras.layers.Embedding( + config.max_position_embeddings, + config.hidden_size, + embeddings_initializer=get_initializer(config.initializer_range), + name="position_embeddings", + ) + self.x_position_embeddings = tf.keras.layers.Embedding( + config.max_2d_position_embeddings, + config.coordinate_size, + embeddings_initializer=get_initializer(config.initializer_range), + name="x_position_embeddings", + ) + self.y_position_embeddings = tf.keras.layers.Embedding( + config.max_2d_position_embeddings, + config.coordinate_size, + embeddings_initializer=get_initializer(config.initializer_range), + name="y_position_embeddings", + ) + self.h_position_embeddings = tf.keras.layers.Embedding( + config.max_2d_position_embeddings, + config.shape_size, + embeddings_initializer=get_initializer(config.initializer_range), + name="h_position_embeddings", + ) + self.w_position_embeddings = tf.keras.layers.Embedding( + config.max_2d_position_embeddings, + config.shape_size, + embeddings_initializer=get_initializer(config.initializer_range), + name="w_position_embeddings", + ) + self.max_2d_positions = config.max_2d_position_embeddings + + def calculate_spatial_position_embeddings(self, bbox: tf.Tensor) -> tf.Tensor: + try: + left_position_ids = bbox[:, :, 0] + upper_position_ids = bbox[:, :, 1] + right_position_ids = bbox[:, :, 2] + lower_position_ids = bbox[:, :, 3] + except IndexError as exception: + raise IndexError("Bounding box is not of shape (batch_size, seq_length, 4).") from exception + + try: + left_position_embeddings = self.x_position_embeddings(left_position_ids) + upper_position_embeddings = self.y_position_embeddings(upper_position_ids) + right_position_embeddings = self.x_position_embeddings(right_position_ids) + lower_position_embeddings = self.y_position_embeddings(lower_position_ids) + except IndexError as exception: + raise IndexError( + f"The `bbox` coordinate values should be within 0-{self.max_2d_positions} range." + ) from exception + + max_position_id = self.max_2d_positions - 1 + h_position_embeddings = self.h_position_embeddings( + tf.clip_by_value(bbox[:, :, 3] - bbox[:, :, 1], 0, max_position_id) + ) + w_position_embeddings = self.w_position_embeddings( + tf.clip_by_value(bbox[:, :, 2] - bbox[:, :, 0], 0, max_position_id) + ) + + # LayoutLMv1 sums the spatial embeddings, but LayoutLMv3 concatenates them. + spatial_position_embeddings = tf.concat( + [ + left_position_embeddings, + upper_position_embeddings, + right_position_embeddings, + lower_position_embeddings, + h_position_embeddings, + w_position_embeddings, + ], + axis=-1, + ) + return spatial_position_embeddings + + def create_position_ids_from_inputs_embeds(self, inputs_embds: tf.Tensor) -> tf.Tensor: + """ + We are provided embeddings directly. We cannot infer which are padded, so just generate sequential position + ids. + """ + input_shape = tf.shape(inputs_embds) + sequence_length = input_shape[1] + start_index = self.padding_token_index + 1 + end_index = self.padding_token_index + sequence_length + 1 + position_ids = tf.range(start_index, end_index, dtype=tf.int32) + batch_size = input_shape[0] + position_ids = tf.reshape(position_ids, (1, sequence_length)) + position_ids = tf.tile(position_ids, (batch_size, 1)) + return position_ids + + def create_position_ids_from_input_ids(self, input_ids: tf.Tensor) -> tf.Tensor: + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_token_index + 1. + """ + mask = tf.cast(tf.not_equal(input_ids, self.padding_token_index), input_ids.dtype) + position_ids = tf.cumsum(mask, axis=1) * mask + position_ids = position_ids + self.padding_token_index + return position_ids + + def create_position_ids(self, input_ids: tf.Tensor, inputs_embeds: tf.Tensor) -> tf.Tensor: + if input_ids is None: + return self.create_position_ids_from_inputs_embeds(inputs_embeds) + else: + return self.create_position_ids_from_input_ids(input_ids) + + def call( + self, + input_ids: Optional[tf.Tensor] = None, + bbox: tf.Tensor = None, + token_type_ids: Optional[tf.Tensor] = None, + position_ids: Optional[tf.Tensor] = None, + inputs_embeds: Optional[tf.Tensor] = None, + training: bool = False, + ) -> tf.Tensor: + if position_ids is None: + position_ids = self.create_position_ids(input_ids, inputs_embeds) + + if input_ids is not None: + input_shape = tf.shape(input_ids) + else: + input_shape = tf.shape(inputs_embeds)[:-1] + + if token_type_ids is None: + token_type_ids = tf.zeros(input_shape, dtype=position_ids.dtype) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + + spatial_position_embeddings = self.calculate_spatial_position_embeddings(bbox) + + embeddings += spatial_position_embeddings + + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings, training=training) + return embeddings + + +class TFLayoutLMv3SelfAttention(tf.keras.layers.Layer): + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.attention_score_normaliser = math.sqrt(self.attention_head_size) + + self.query = tf.keras.layers.Dense( + self.all_head_size, + kernel_initializer=get_initializer(config.initializer_range), + name="query", + ) + self.key = tf.keras.layers.Dense( + self.all_head_size, + kernel_initializer=get_initializer(config.initializer_range), + name="key", + ) + self.value = tf.keras.layers.Dense( + self.all_head_size, + kernel_initializer=get_initializer(config.initializer_range), + name="value", + ) + + self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) + self.has_relative_attention_bias = config.has_relative_attention_bias + self.has_spatial_attention_bias = config.has_spatial_attention_bias + + def transpose_for_scores(self, x: tf.Tensor): + shape = tf.shape(x) + new_shape = ( + shape[0], # batch_size + shape[1], # seq_length + self.num_attention_heads, + self.attention_head_size, + ) + x = tf.reshape(x, new_shape) + return tf.transpose(x, perm=[0, 2, 1, 3]) # batch_size, num_heads, seq_length, attention_head_size + + def cogview_attention(self, attention_scores: tf.Tensor, alpha: Union[float, int] = 32): + """ + https://arxiv.org/abs/2105.13290 Section 2.4 Stabilization of training: Precision Bottleneck Relaxation + (PB-Relax). A replacement of the original tf.keras.layers.Softmax(axis=-1)(attention_scores). Seems the new + attention_probs will result in a slower speed and a little bias. Can use + tf.debugging.assert_near(standard_attention_probs, cogview_attention_probs, atol=1e-08) for comparison. The + smaller atol (e.g., 1e-08), the better. + """ + scaled_attention_scores = attention_scores / alpha + max_value = tf.expand_dims(tf.reduce_max(scaled_attention_scores, axis=-1), axis=-1) + new_attention_scores = (scaled_attention_scores - max_value) * alpha + return tf.math.softmax(new_attention_scores, axis=-1) + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: Optional[tf.Tensor], + head_mask: Optional[tf.Tensor], + output_attentions: bool, + rel_pos: Optional[tf.Tensor] = None, + rel_2d_pos: Optional[tf.Tensor] = None, + training: bool = False, + ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(self.query(hidden_states)) + + # Take the dot product between "query" and "key" to get the raw attention scores. + normalised_query_layer = query_layer / self.attention_score_normaliser + transposed_key_layer = tf.transpose( + key_layer, perm=[0, 1, 3, 2] + ) # batch_size, num_heads, attention_head_size, seq_length + attention_scores = tf.matmul(normalised_query_layer, transposed_key_layer) + + if self.has_relative_attention_bias and self.has_spatial_attention_bias: + attention_scores += (rel_pos + rel_2d_pos) / self.attention_score_normaliser + elif self.has_relative_attention_bias: + attention_scores += rel_pos / self.attention_score_normaliser + + if attention_mask is not None: + # Apply the attention mask (is precomputed for all layers in TFLayoutLMv3Model call() function) + attention_scores += attention_mask + + # Normalize the attention scores to probabilities. + # Use the trick of CogView paper to stabilize training. + attention_probs = self.cogview_attention(attention_scores) + + attention_probs = self.dropout(attention_probs, training=training) + + # Mask heads if we want to. + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = tf.matmul(attention_probs, value_layer) + context_layer = tf.transpose( + context_layer, perm=[0, 2, 1, 3] + ) # batch_size, seq_length, num_heads, attention_head_size + shape = tf.shape(context_layer) + context_layer = tf.reshape( + context_layer, (shape[0], shape[1], self.all_head_size) + ) # batch_size, seq_length, num_heads * attention_head_size + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +# Copied from models.roberta.modeling_tf_roberta.TFRobertaSelfOutput +class TFLayoutLMv3SelfOutput(tf.keras.layers.Layer): + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + + self.dense = tf.keras.layers.Dense( + units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) + + def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) + + return hidden_states + + +class TFLayoutLMv3Attention(tf.keras.layers.Layer): + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + self.self_attention = TFLayoutLMv3SelfAttention(config, name="self") + self.self_output = TFLayoutLMv3SelfOutput(config, name="output") + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: Optional[tf.Tensor], + head_mask: Optional[tf.Tensor], + output_attentions: bool, + rel_pos: Optional[tf.Tensor] = None, + rel_2d_pos: Optional[tf.Tensor] = None, + training: bool = False, + ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]: + self_outputs = self.self_attention( + hidden_states, + attention_mask, + head_mask, + output_attentions, + rel_pos, + rel_2d_pos, + training=training, + ) + attention_output = self.self_output(self_outputs[0], hidden_states, training=training) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from models.roberta.modeling_tf_bert.TFRobertaIntermediate +class TFLayoutLMv3Intermediate(tf.keras.layers.Layer): + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + + self.dense = tf.keras.layers.Dense( + units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = get_tf_activation(config.hidden_act) + else: + self.intermediate_act_fn = config.hidden_act + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + + return hidden_states + + +# Copied from models.roberta.modeling_tf_bert.TFRobertaOutput +class TFLayoutLMv3Output(tf.keras.layers.Layer): + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + + self.dense = tf.keras.layers.Dense( + units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) + + def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) + + return hidden_states + + +class TFLayoutLMv3Layer(tf.keras.layers.Layer): + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + self.attention = TFLayoutLMv3Attention(config, name="attention") + self.intermediate = TFLayoutLMv3Intermediate(config, name="intermediate") + self.bert_output = TFLayoutLMv3Output(config, name="output") + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: Optional[tf.Tensor], + head_mask: Optional[tf.Tensor], + output_attentions: bool, + rel_pos: Optional[tf.Tensor] = None, + rel_2d_pos: Optional[tf.Tensor] = None, + training: bool = False, + ) -> Union[Tuple[tf.Tensor], Tuple[tf.Tensor, tf.Tensor]]: + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + rel_pos=rel_pos, + rel_2d_pos=rel_2d_pos, + training=training, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + intermediate_output = self.intermediate(attention_output) + layer_output = self.bert_output(intermediate_output, attention_output, training=training) + outputs = (layer_output,) + outputs + return outputs + + +class TFLayoutLMv3Encoder(tf.keras.layers.Layer): + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + self.config = config + self.layer = [TFLayoutLMv3Layer(config, name=f"layer.{i}") for i in range(config.num_hidden_layers)] + + self.has_relative_attention_bias = config.has_relative_attention_bias + self.has_spatial_attention_bias = config.has_spatial_attention_bias + + if self.has_relative_attention_bias: + self.rel_pos_bins = config.rel_pos_bins + self.max_rel_pos = config.max_rel_pos + self.rel_pos_bias = tf.keras.layers.Dense( + units=config.num_attention_heads, + kernel_initializer=get_initializer(config.initializer_range), + use_bias=False, + name="rel_pos_bias", + ) + + if self.has_spatial_attention_bias: + self.max_rel_2d_pos = config.max_rel_2d_pos + self.rel_2d_pos_bins = config.rel_2d_pos_bins + self.rel_pos_x_bias = tf.keras.layers.Dense( + units=config.num_attention_heads, + kernel_initializer=get_initializer(config.initializer_range), + use_bias=False, + name="rel_pos_x_bias", + ) + self.rel_pos_y_bias = tf.keras.layers.Dense( + units=config.num_attention_heads, + kernel_initializer=get_initializer(config.initializer_range), + use_bias=False, + name="rel_pos_y_bias", + ) + + def relative_position_bucket(self, relative_positions: tf.Tensor, num_buckets: int, max_distance: int): + # the negative relative positions are assigned to the interval [0, num_buckets / 2] + # we deal with this by assigning absolute relative positions to the interval [0, num_buckets / 2] + # and then offsetting the positive relative positions by num_buckets / 2 at the end + num_buckets = num_buckets // 2 + buckets = tf.abs(relative_positions) + + # half of the buckets are for exact increments in positions + max_exact_buckets = num_buckets // 2 + is_small = buckets < max_exact_buckets + + # the other half of the buckets are for logarithmically bigger bins in positions up to max_distance + buckets_log_ratio = tf.math.log(tf.cast(buckets, tf.float32) / max_exact_buckets) + distance_log_ratio = math.log(max_distance / max_exact_buckets) + buckets_big_offset = ( + buckets_log_ratio / distance_log_ratio * (num_buckets - max_exact_buckets) + ) # scale is [0, num_buckets - max_exact_buckets] + buckets_big = max_exact_buckets + buckets_big_offset # scale is [max_exact_buckets, num_buckets] + buckets_big = tf.cast(buckets_big, buckets.dtype) + buckets_big = tf.minimum(buckets_big, num_buckets - 1) + + return (tf.cast(relative_positions > 0, buckets.dtype) * num_buckets) + tf.where( + is_small, buckets, buckets_big + ) + + def _cal_pos_emb( + self, + dense_layer: tf.keras.layers.Dense, + position_ids: tf.Tensor, + num_buckets: int, + max_distance: int, + ): + rel_pos_matrix = tf.expand_dims(position_ids, axis=-2) - tf.expand_dims(position_ids, axis=-1) + rel_pos = self.relative_position_bucket(rel_pos_matrix, num_buckets, max_distance) + rel_pos_one_hot = tf.one_hot(rel_pos, depth=num_buckets, dtype=self.compute_dtype) + embedding = dense_layer(rel_pos_one_hot) + # batch_size, seq_length, seq_length, num_heads --> batch_size, num_heads, seq_length, seq_length + embedding = tf.transpose(embedding, [0, 3, 1, 2]) + embedding = tf.cast(embedding, dtype=self.compute_dtype) + return embedding + + def _cal_1d_pos_emb(self, position_ids: tf.Tensor): + return self._cal_pos_emb(self.rel_pos_bias, position_ids, self.rel_pos_bins, self.max_rel_pos) + + def _cal_2d_pos_emb(self, bbox: tf.Tensor): + position_coord_x = bbox[:, :, 0] # left + position_coord_y = bbox[:, :, 3] # bottom + rel_pos_x = self._cal_pos_emb( + self.rel_pos_x_bias, + position_coord_x, + self.rel_2d_pos_bins, + self.max_rel_2d_pos, + ) + rel_pos_y = self._cal_pos_emb( + self.rel_pos_y_bias, + position_coord_y, + self.rel_2d_pos_bins, + self.max_rel_2d_pos, + ) + rel_2d_pos = rel_pos_x + rel_pos_y + return rel_2d_pos + + def call( + self, + hidden_states: tf.Tensor, + bbox: Optional[tf.Tensor] = None, + attention_mask: Optional[tf.Tensor] = None, + head_mask: Optional[tf.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + position_ids: Optional[tf.Tensor] = None, + training: bool = False, + ) -> Union[ + TFBaseModelOutput, + Tuple[tf.Tensor], + Tuple[tf.Tensor, tf.Tensor], + Tuple[tf.Tensor, tf.Tensor, tf.Tensor], + ]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + rel_pos = self._cal_1d_pos_emb(position_ids) if self.has_relative_attention_bias else None + rel_2d_pos = self._cal_2d_pos_emb(bbox) if self.has_spatial_attention_bias else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + output_attentions, + rel_pos=rel_pos, + rel_2d_pos=rel_2d_pos, + training=training, + ) + + hidden_states = layer_outputs[0] + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if return_dict: + return TFBaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + else: + return tuple( + value for value in [hidden_states, all_hidden_states, all_self_attentions] if value is not None + ) + + +@keras_serializable +class TFLayoutLMv3MainLayer(tf.keras.layers.Layer): + config_class = LayoutLMv3Config + + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + + self.config = config + + if config.text_embed: + self.embeddings = TFLayoutLMv3TextEmbeddings(config, name="embeddings") + + if config.visual_embed: + self.patch_embed = TFLayoutLMv3PatchEmbeddings(config, name="patch_embed") + self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") + + if config.has_relative_attention_bias or config.has_spatial_attention_bias: + image_size = config.input_size // config.patch_size + self.init_visual_bbox(image_size=(image_size, image_size)) + + self.norm = tf.keras.layers.LayerNormalization(epsilon=1e-6, name="norm") + + self.encoder = TFLayoutLMv3Encoder(config, name="encoder") + + def build(self, input_shape: tf.TensorShape): + if self.config.visual_embed: + image_size = self.config.input_size // self.config.patch_size + self.cls_token = self.add_weight( + shape=(1, 1, self.config.hidden_size), + initializer="zeros", + trainable=True, + dtype=tf.float32, + name="cls_token", + ) + self.pos_embed = self.add_weight( + shape=(1, image_size * image_size + 1, self.config.hidden_size), + initializer="zeros", + trainable=True, + dtype=tf.float32, + name="pos_embed", + ) + + super().build(input_shape) + + def get_input_embeddings(self) -> tf.keras.layers.Layer: + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value: tf.Variable): + self.embeddings.word_embeddings.weight = value + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + raise NotImplementedError + + def init_visual_bbox(self, image_size: Tuple[int, int], max_len: int = 1000): + # We should not hardcode max_len to 1000, but it is done by the reference implementation, + # so we keep it for compatibility with the pretrained weights. The more correct approach + # would have been to pass on max_len=config.max_2d_position_embeddings - 1. + height, width = image_size + + visual_bbox_x = tf.range(0, max_len * (width + 1), max_len) // width + visual_bbox_x = tf.expand_dims(visual_bbox_x, axis=0) + visual_bbox_x = tf.tile(visual_bbox_x, [width, 1]) # (width, width + 1) + + visual_bbox_y = tf.range(0, max_len * (height + 1), max_len) // height + visual_bbox_y = tf.expand_dims(visual_bbox_y, axis=1) + visual_bbox_y = tf.tile(visual_bbox_y, [1, height]) # (height + 1, height) + + visual_bbox = tf.stack( + [visual_bbox_x[:, :-1], visual_bbox_y[:-1], visual_bbox_x[:, 1:], visual_bbox_y[1:]], + axis=-1, + ) + visual_bbox = tf.reshape(visual_bbox, [-1, 4]) + + cls_token_box = tf.constant([[1, 1, max_len - 1, max_len - 1]], dtype=tf.int32) + self.visual_bbox = tf.concat([cls_token_box, visual_bbox], axis=0) + + def calculate_visual_bbox(self, batch_size: int, dtype: tf.DType): + visual_bbox = tf.expand_dims(self.visual_bbox, axis=0) + visual_bbox = tf.tile(visual_bbox, [batch_size, 1, 1]) + visual_bbox = tf.cast(visual_bbox, dtype=dtype) + return visual_bbox + + def embed_image(self, pixel_values: tf.Tensor) -> tf.Tensor: + embeddings = self.patch_embed(pixel_values) + + # add [CLS] token + batch_size = tf.shape(embeddings)[0] + cls_tokens = tf.tile(self.cls_token, [batch_size, 1, 1]) + embeddings = tf.concat([cls_tokens, embeddings], axis=1) + + # add position embeddings + if getattr(self, "pos_embed", None) is not None: + embeddings += self.pos_embed + + embeddings = self.norm(embeddings) + return embeddings + + def get_extended_attention_mask(self, attention_mask: tf.Tensor) -> tf.Tensor: + # Adapted from transformers.modelling_utils.ModuleUtilsMixin.get_extended_attention_mask + + n_dims = len(attention_mask.shape) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + if n_dims == 3: + extended_attention_mask = tf.expand_dims(attention_mask, axis=1) + elif n_dims == 2: + # Provided a padding mask of dimensions [batch_size, seq_length]. + # Make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]. + extended_attention_mask = tf.expand_dims(attention_mask, axis=1) # (batch_size, 1, seq_length) + extended_attention_mask = tf.expand_dims(extended_attention_mask, axis=1) # (batch_size, 1, 1, seq_length) + else: + raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape}).") + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = tf.cast(extended_attention_mask, self.compute_dtype) + extended_attention_mask = (1.0 - extended_attention_mask) * LARGE_NEGATIVE + + return extended_attention_mask + + def get_head_mask(self, head_mask: Optional[tf.Tensor]) -> Union[tf.Tensor, List[Optional[tf.Tensor]]]: + if head_mask is None: + return [None] * self.config.num_hidden_layers + + n_dims = tf.rank(head_mask) + if n_dims == 1: + # Gets a tensor with masks for each head (H). + head_mask = tf.expand_dims(head_mask, axis=0) # 1, num_heads + head_mask = tf.expand_dims(head_mask, axis=0) # 1, 1, num_heads + head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1 + head_mask = tf.expand_dims(head_mask, axis=-1) # 1, 1, num_heads, 1, 1 + head_mask = tf.tile( + head_mask, [self.config.num_hidden_layers, 1, 1, 1, 1] + ) # seq_length, 1, num_heads, 1, 1 + elif n_dims == 2: + # Gets a tensor with masks for each layer (L) and head (H). + head_mask = tf.expand_dims(head_mask, axis=1) # seq_length, 1, num_heads + head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1 + head_mask = tf.expand_dims(head_mask, axis=-1) # seq_length, 1, num_heads, 1, 1 + elif n_dims != 5: + raise ValueError(f"Wrong shape for head_mask (shape {head_mask.shape}).") + assert tf.rank(head_mask) == 5, f"Got head_mask rank of {tf.rank(head_mask)}, but require 5." + head_mask = tf.cast(head_mask, self.compute_dtype) + return head_mask + + @unpack_inputs + def call( + self, + input_ids: Optional[tf.Tensor] = None, + bbox: Optional[tf.Tensor] = None, + attention_mask: Optional[tf.Tensor] = None, + token_type_ids: Optional[tf.Tensor] = None, + position_ids: Optional[tf.Tensor] = None, + head_mask: Optional[tf.Tensor] = None, + inputs_embeds: Optional[tf.Tensor] = None, + pixel_values: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[ + TFBaseModelOutput, + Tuple[tf.Tensor], + Tuple[tf.Tensor, tf.Tensor], + Tuple[tf.Tensor, tf.Tensor, tf.Tensor], + ]: + # This method can be called with a variety of modalities: + # 1. text + layout + # 2. text + layout + image + # 3. image + # The complexity of this method is mostly just due to handling of these different modalities. + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + if input_ids is not None: + input_shape = tf.shape(input_ids) + batch_size = input_shape[0] + seq_length = input_shape[1] + elif inputs_embeds is not None: + input_shape = tf.shape(inputs_embeds) + batch_size = input_shape[0] + seq_length = input_shape[1] + elif pixel_values is not None: + batch_size = tf.shape(pixel_values)[0] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds or pixel_values") + + # Determine which integer dtype to use. + if input_ids is not None: + int_dtype = input_ids.dtype + elif bbox is not None: + int_dtype = bbox.dtype + elif attention_mask is not None: + int_dtype = attention_mask.dtype + elif token_type_ids is not None: + int_dtype = token_type_ids.dtype + else: + int_dtype = tf.int32 + + if input_ids is not None or inputs_embeds is not None: + if attention_mask is None: + attention_mask = tf.ones((batch_size, seq_length), dtype=int_dtype) + if token_type_ids is None: + token_type_ids = tf.zeros((batch_size, seq_length), dtype=int_dtype) + if bbox is None: + bbox = tf.zeros((batch_size, seq_length, 4), dtype=int_dtype) + + embedding_output = self.embeddings( + input_ids=input_ids, + bbox=bbox, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + training=training, + ) + + final_bbox = None + final_position_ids = None + if pixel_values is not None: + # embed image + visual_embeddings = self.embed_image(pixel_values) + + # calculate attention mask + visual_attention_mask = tf.ones((batch_size, tf.shape(visual_embeddings)[1]), dtype=int_dtype) + if attention_mask is None: + attention_mask = visual_attention_mask + else: + attention_mask = tf.concat([attention_mask, visual_attention_mask], axis=1) + + # calculate bounding boxes + if self.config.has_spatial_attention_bias: + visual_bbox = self.calculate_visual_bbox(batch_size, int_dtype) + if bbox is None: + final_bbox = visual_bbox + else: + final_bbox = tf.concat([bbox, visual_bbox], axis=1) + + # calculate position IDs + if self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: + visual_position_ids = tf.range(0, tf.shape(visual_embeddings)[1], dtype=int_dtype) + visual_position_ids = tf.expand_dims(visual_position_ids, axis=0) + visual_position_ids = tf.tile(visual_position_ids, [batch_size, 1]) + + if input_ids is not None or inputs_embeds is not None: + position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0) + position_ids = tf.tile(position_ids, [batch_size, 1]) + final_position_ids = tf.concat([position_ids, visual_position_ids], axis=1) + else: + final_position_ids = visual_position_ids + + # calculate embeddings + if input_ids is None and inputs_embeds is None: + embedding_output = visual_embeddings + else: + embedding_output = tf.concat([embedding_output, visual_embeddings], axis=1) + embedding_output = self.LayerNorm(embedding_output) + embedding_output = self.dropout(embedding_output, training=training) + + elif self.config.has_relative_attention_bias or self.config.has_spatial_attention_bias: + if self.config.has_relative_attention_bias: + position_ids = tf.expand_dims(tf.range(0, seq_length, dtype=int_dtype), axis=0) + position_ids = tf.tile(position_ids, [batch_size, 1]) + final_position_ids = position_ids + + if self.config.has_spatial_attention_bias: + final_bbox = bbox + + extended_attention_mask = self.get_extended_attention_mask(attention_mask) + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape batch_size x num_heads x seq_length x seq_length + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask) + + encoder_outputs = self.encoder( + embedding_output, + bbox=final_bbox, + position_ids=final_position_ids, + attention_mask=extended_attention_mask, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = encoder_outputs[0] + + if not return_dict: + return (sequence_output,) + encoder_outputs[1:] + + return TFBaseModelOutput( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + return TFBaseModelOutput( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class TFLayoutLMv3PreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = LayoutLMv3Config + base_model_prefix = "layoutlmv3" + + @property + def dummy_inputs(self) -> Dict[str, tf.Tensor]: + size = self.config.input_size + image_shape = (2, self.config.num_channels, size, size) + pixel_values = tf.random.uniform(shape=image_shape, minval=-1, maxval=1) + return { + "input_ids": tf.constant(_DUMMY_INPUT_IDS, dtype=tf.int32), + "bbox": tf.constant(_DUMMY_BBOX, dtype=tf.int32), + "pixel_values": pixel_values, + } + + @tf.function( + input_signature=[ + { + "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), + "bbox": tf.TensorSpec((None, None, 4), tf.int32, name="bbox"), + "pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"), + "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + } + ] + ) + def serving(self, inputs): + """ + Method used for serving the model. + + Args: + inputs (`Dict[str, tf.Tensor]`): + The input of the saved model as a dictionary of tensors. + """ + output = self.call(inputs) + + return self.serving_output(output) + + +LAYOUTLMV3_START_DOCSTRING = r""" + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TF 2.0 models accepts two formats as inputs: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional arguments. + + This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the + tensors in the first argument of the model call function: `model(inputs)`. + + + + Parameters: + config ([`LayoutLMv3Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +LAYOUTLMV3_INPUTS_DOCSTRING = r""" + Args: + input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + Indices can be obtained using [`LayoutLMv3Tokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + + bbox (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, 4)`, *optional*): + Bounding boxes of each input sequence tokens. Selected in the range `[0, + config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1) + format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1, + y1) represents the position of the lower right corner. + + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): + Batch of document images. Each image is divided into patches of shape `(num_channels, config.patch_size, + config.patch_size)` and the total number of patches (=`patch_sequence_length`) equals to `((height / + config.patch_size) * (width / config.patch_size))`. + + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + Note that `sequence_length = token_sequence_length + patch_sequence_length + 1` where `1` is for [CLS] + token. See `pixel_values` for `patch_sequence_length`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare LayoutLMv3 Model transformer outputting raw hidden-states without any specific head on top.", + LAYOUTLMV3_START_DOCSTRING, +) +class TFLayoutLMv3Model(TFLayoutLMv3PreTrainedModel): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"position_ids"] + + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") + + @unpack_inputs + @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: Optional[tf.Tensor] = None, + bbox: Optional[tf.Tensor] = None, + attention_mask: Optional[tf.Tensor] = None, + token_type_ids: Optional[tf.Tensor] = None, + position_ids: Optional[tf.Tensor] = None, + head_mask: Optional[tf.Tensor] = None, + inputs_embeds: Optional[tf.Tensor] = None, + pixel_values: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[ + TFBaseModelOutput, + Tuple[tf.Tensor], + Tuple[tf.Tensor, tf.Tensor], + Tuple[tf.Tensor, tf.Tensor, tf.Tensor], + ]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, TFAutoModel + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) + >>> model = TFAutoModel.from_pretrained("microsoft/layoutlmv3-base") + + >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") + >>> example = dataset[0] + >>> image = example["image"] + >>> words = example["tokens"] + >>> boxes = example["bboxes"] + + >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf") + + >>> outputs = model(**encoding) + >>> last_hidden_states = outputs.last_hidden_state + ```""" + + outputs = self.layoutlmv3( + input_ids=input_ids, + bbox=bbox, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFBaseModelOutput( + last_hidden_state=output.last_hidden_state, + hidden_states=hs, + attentions=attns, + ) + + +class TFLayoutLMv3ClassificationHead(tf.keras.layers.Layer): + """ + Head for sentence-level classification tasks. Reference: RobertaClassificationHead + """ + + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(**kwargs) + self.dense = tf.keras.layers.Dense( + config.hidden_size, + activation="tanh", + kernel_initializer=get_initializer(config.initializer_range), + name="dense", + ) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = tf.keras.layers.Dropout( + classifier_dropout, + name="dropout", + ) + self.out_proj = tf.keras.layers.Dense( + config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name="out_proj", + ) + + def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor: + outputs = self.dropout(inputs, training=training) + outputs = self.dense(outputs) + outputs = self.dropout(outputs, training=training) + outputs = self.out_proj(outputs) + return outputs + + +@add_start_docstrings( + """ + LayoutLMv3 Model with a sequence classification head on top (a linear layer on top of the final hidden state of the + [CLS] token) e.g. for document image classification tasks such as the + [RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset. + """, + LAYOUTLMV3_START_DOCSTRING, +) +class TFLayoutLMv3ForSequenceClassification(TFLayoutLMv3PreTrainedModel, TFSequenceClassificationLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"position_ids"] + + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(config, **kwargs) + self.config = config + self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") + self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier") + + @unpack_inputs + @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: Optional[tf.Tensor] = None, + attention_mask: Optional[tf.Tensor] = None, + token_type_ids: Optional[tf.Tensor] = None, + position_ids: Optional[tf.Tensor] = None, + head_mask: Optional[tf.Tensor] = None, + inputs_embeds: Optional[tf.Tensor] = None, + labels: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + bbox: Optional[tf.Tensor] = None, + pixel_values: Optional[tf.Tensor] = None, + training: Optional[bool] = False, + ) -> Union[ + TFSequenceClassifierOutput, + Tuple[tf.Tensor], + Tuple[tf.Tensor, tf.Tensor], + Tuple[tf.Tensor, tf.Tensor, tf.Tensor], + Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor], + ]: + """ + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, TFAutoModelForSequenceClassification + >>> from datasets import load_dataset + >>> import tensorflow as tf + + >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) + >>> model = TFAutoModelForSequenceClassification.from_pretrained("microsoft/layoutlmv3-base") + + >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") + >>> example = dataset[0] + >>> image = example["image"] + >>> words = example["tokens"] + >>> boxes = example["bboxes"] + + >>> encoding = processor(image, words, boxes=boxes, return_tensors="tf") + >>> sequence_label = tf.convert_to_tensor([1]) + + >>> outputs = model(**encoding, labels=sequence_label) + >>> loss = outputs.loss + >>> logits = outputs.logits + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.layoutlmv3( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + bbox=bbox, + pixel_values=pixel_values, + training=training, + ) + sequence_output = outputs[0][:, 0, :] + logits = self.classifier(sequence_output, training=training) + + loss = None if labels is None else self.hf_compute_loss(labels, logits) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFSequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output + def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) + + +@add_start_docstrings( + """ + LayoutLMv3 Model with a token classification head on top (a linear layer on top of the final hidden states) e.g. + for sequence labeling (information extraction) tasks such as [FUNSD](https://guillaumejaume.github.io/FUNSD/), + [SROIE](https://rrc.cvc.uab.es/?ch=13), [CORD](https://github.com/clovaai/cord) and + [Kleister-NDA](https://github.com/applicaai/kleister-nda). + """, + LAYOUTLMV3_START_DOCSTRING, +) +class TFLayoutLMv3ForTokenClassification(TFLayoutLMv3PreTrainedModel, TFTokenClassificationLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"position_ids"] + + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(config, **kwargs) + self.num_labels = config.num_labels + + self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob, name="dropout") + if config.num_labels < 10: + self.classifier = tf.keras.layers.Dense( + config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name="classifier", + ) + else: + self.classifier = TFLayoutLMv3ClassificationHead(config, name="classifier") + + @unpack_inputs + @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: Optional[tf.Tensor] = None, + bbox: Optional[tf.Tensor] = None, + attention_mask: Optional[tf.Tensor] = None, + token_type_ids: Optional[tf.Tensor] = None, + position_ids: Optional[tf.Tensor] = None, + head_mask: Optional[tf.Tensor] = None, + inputs_embeds: Optional[tf.Tensor] = None, + labels: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + pixel_values: Optional[tf.Tensor] = None, + training: Optional[bool] = False, + ) -> Union[ + TFTokenClassifierOutput, + Tuple[tf.Tensor], + Tuple[tf.Tensor, tf.Tensor], + Tuple[tf.Tensor, tf.Tensor, tf.Tensor], + Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor], + ]: + r""" + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, TFAutoModelForTokenClassification + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) + >>> model = TFAutoModelForTokenClassification.from_pretrained("microsoft/layoutlmv3-base", num_labels=7) + + >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") + >>> example = dataset[0] + >>> image = example["image"] + >>> words = example["tokens"] + >>> boxes = example["bboxes"] + >>> word_labels = example["ner_tags"] + + >>> encoding = processor(image, words, boxes=boxes, word_labels=word_labels, return_tensors="tf") + + >>> outputs = model(**encoding) + >>> loss = outputs.loss + >>> logits = outputs.logits + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.layoutlmv3( + input_ids, + bbox=bbox, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + pixel_values=pixel_values, + training=training, + ) + if input_ids is not None: + input_shape = tf.shape(input_ids) + else: + input_shape = tf.shape(inputs_embeds)[:-1] + + seq_length = input_shape[1] + # only take the text part of the output representations + sequence_output = outputs[0][:, :seq_length] + sequence_output = self.dropout(sequence_output, training=training) + logits = self.classifier(sequence_output) + + loss = None if labels is None else self.hf_compute_loss(labels, logits) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFTokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output + def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) + + +@add_start_docstrings( + """ + LayoutLMv3 Model with a span classification head on top for extractive question-answering tasks such as + [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the text part of the hidden-states output to + compute `span start logits` and `span end logits`). + """, + LAYOUTLMV3_START_DOCSTRING, +) +class TFLayoutLMv3ForQuestionAnswering(TFLayoutLMv3PreTrainedModel, TFQuestionAnsweringLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"position_ids"] + + def __init__(self, config: LayoutLMv3Config, **kwargs): + super().__init__(config, **kwargs) + + self.num_labels = config.num_labels + + self.layoutlmv3 = TFLayoutLMv3MainLayer(config, name="layoutlmv3") + self.qa_outputs = TFLayoutLMv3ClassificationHead(config, name="qa_outputs") + + @unpack_inputs + @add_start_docstrings_to_model_forward(LAYOUTLMV3_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: Optional[tf.Tensor] = None, + attention_mask: Optional[tf.Tensor] = None, + token_type_ids: Optional[tf.Tensor] = None, + position_ids: Optional[tf.Tensor] = None, + head_mask: Optional[tf.Tensor] = None, + inputs_embeds: Optional[tf.Tensor] = None, + start_positions: Optional[tf.Tensor] = None, + end_positions: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + bbox: Optional[tf.Tensor] = None, + pixel_values: Optional[tf.Tensor] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[ + TFQuestionAnsweringModelOutput, + Tuple[tf.Tensor], + Tuple[tf.Tensor, tf.Tensor], + Tuple[tf.Tensor, tf.Tensor, tf.Tensor], + Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor], + ]: + r""" + start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, TFAutoModelForQuestionAnswering + >>> from datasets import load_dataset + >>> import tensorflow as tf + + >>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv3-base", apply_ocr=False) + >>> model = TFAutoModelForQuestionAnswering.from_pretrained("microsoft/layoutlmv3-base") + + >>> dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") + >>> example = dataset[0] + >>> image = example["image"] + >>> question = "what's his name?" + >>> words = example["tokens"] + >>> boxes = example["bboxes"] + + >>> encoding = processor(image, question, words, boxes=boxes, return_tensors="tf") + >>> start_positions = tf.convert_to_tensor([1]) + >>> end_positions = tf.convert_to_tensor([3]) + + >>> outputs = model(**encoding, start_positions=start_positions, end_positions=end_positions) + >>> loss = outputs.loss + >>> start_scores = outputs.start_logits + >>> end_scores = outputs.end_logits + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.layoutlmv3( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + bbox=bbox, + pixel_values=pixel_values, + training=training, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output, training=training) + start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) + start_logits = tf.squeeze(input=start_logits, axis=-1) + end_logits = tf.squeeze(input=end_logits, axis=-1) + + loss = None + + if start_positions is not None and end_positions is not None: + labels = {"start_position": start_positions, "end_position": end_positions} + loss = self.hf_compute_loss(labels, logits=(start_logits, end_logits)) + + if not return_dict: + output = (start_logits, end_logits) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFQuestionAnsweringModelOutput( + loss=loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output + def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFQuestionAnsweringModelOutput( + start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns + ) diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 5f8124ae558465..e77a414cdce437 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -1316,6 +1316,44 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFLayoutLMv3ForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3ForSequenceClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3ForTokenClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3Model(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFLayoutLMv3PreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + class TFLEDForConditionalGeneration(metaclass=DummyObject): _backends = ["tf"] diff --git a/tests/models/layoutlmv3/test_modeling_tf_layoutlmv3.py b/tests/models/layoutlmv3/test_modeling_tf_layoutlmv3.py new file mode 100644 index 00000000000000..f71aeb0aefb4df --- /dev/null +++ b/tests/models/layoutlmv3/test_modeling_tf_layoutlmv3.py @@ -0,0 +1,497 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the TensorFlow LayoutLMv3 model. """ + +import copy +import inspect +import unittest + +import numpy as np + +from transformers import is_tf_available, is_vision_available +from transformers.models.auto import get_values +from transformers.testing_utils import require_tf, slow +from transformers.utils import cached_property + +from ...test_configuration_common import ConfigTester +from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask + + +if is_tf_available(): + import tensorflow as tf + + from transformers import ( + TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, + TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, + TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, + TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + LayoutLMv3Config, + TFLayoutLMv3ForQuestionAnswering, + TFLayoutLMv3ForSequenceClassification, + TFLayoutLMv3ForTokenClassification, + TFLayoutLMv3Model, + ) + +if is_vision_available(): + from PIL import Image + + from transformers import LayoutLMv3FeatureExtractor + + +class TFLayoutLMv3ModelTester: + def __init__( + self, + parent, + batch_size=2, + num_channels=3, + image_size=4, + patch_size=2, + text_seq_length=7, + is_training=True, + use_input_mask=True, + use_token_type_ids=True, + use_labels=True, + vocab_size=99, + hidden_size=36, + num_hidden_layers=3, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + type_sequence_label_size=2, + initializer_range=0.02, + coordinate_size=6, + shape_size=6, + num_labels=3, + num_choices=4, + scope=None, + range_bbox=1000, + ): + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.image_size = image_size + self.patch_size = patch_size + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_token_type_ids = use_token_type_ids + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.coordinate_size = coordinate_size + self.shape_size = shape_size + self.num_labels = num_labels + self.num_choices = num_choices + self.scope = scope + self.range_bbox = range_bbox + + # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) + self.text_seq_length = text_seq_length + self.image_seq_length = (image_size // patch_size) ** 2 + 1 + self.seq_length = self.text_seq_length + self.image_seq_length + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.text_seq_length], self.vocab_size) + + bbox = ids_tensor([self.batch_size, self.text_seq_length, 4], self.range_bbox) + bbox = bbox.numpy() + # Ensure that bbox is legal + for i in range(bbox.shape[0]): + for j in range(bbox.shape[1]): + if bbox[i, j, 3] < bbox[i, j, 1]: + tmp_coordinate = bbox[i, j, 3] + bbox[i, j, 3] = bbox[i, j, 1] + bbox[i, j, 1] = tmp_coordinate + if bbox[i, j, 2] < bbox[i, j, 0]: + tmp_coordinate = bbox[i, j, 2] + bbox[i, j, 2] = bbox[i, j, 0] + bbox[i, j, 0] = tmp_coordinate + bbox = tf.constant(bbox) + + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.text_seq_length]) + + token_type_ids = None + if self.use_token_type_ids: + token_type_ids = ids_tensor([self.batch_size, self.text_seq_length], self.type_vocab_size) + + sequence_labels = None + token_labels = None + if self.use_labels: + sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + token_labels = ids_tensor([self.batch_size, self.text_seq_length], self.num_labels) + + config = LayoutLMv3Config( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + initializer_range=self.initializer_range, + coordinate_size=self.coordinate_size, + shape_size=self.shape_size, + input_size=self.image_size, + patch_size=self.patch_size, + ) + + return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels + + def create_and_check_model(self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask): + model = TFLayoutLMv3Model(config=config) + + # text + image + result = model(input_ids, pixel_values=pixel_values, training=False) + result = model( + input_ids, + bbox=bbox, + pixel_values=pixel_values, + attention_mask=input_mask, + token_type_ids=token_type_ids, + training=False, + ) + result = model(input_ids, bbox=bbox, pixel_values=pixel_values, training=False) + + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + + # text only + result = model(input_ids, training=False) + self.parent.assertEqual( + result.last_hidden_state.shape, (self.batch_size, self.text_seq_length, self.hidden_size) + ) + + # image only + result = model({"pixel_values": pixel_values}, training=False) + self.parent.assertEqual( + result.last_hidden_state.shape, (self.batch_size, self.image_seq_length, self.hidden_size) + ) + + def create_and_check_for_sequence_classification( + self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels + ): + config.num_labels = self.num_labels + model = TFLayoutLMv3ForSequenceClassification(config=config) + result = model( + input_ids, + bbox=bbox, + pixel_values=pixel_values, + attention_mask=input_mask, + token_type_ids=token_type_ids, + labels=sequence_labels, + training=False, + ) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) + + def create_and_check_for_token_classification( + self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, token_labels + ): + config.num_labels = self.num_labels + model = TFLayoutLMv3ForTokenClassification(config=config) + result = model( + input_ids, + bbox=bbox, + pixel_values=pixel_values, + attention_mask=input_mask, + token_type_ids=token_type_ids, + labels=token_labels, + training=False, + ) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.text_seq_length, self.num_labels)) + + def create_and_check_for_question_answering( + self, config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels + ): + config.num_labels = 2 + model = TFLayoutLMv3ForQuestionAnswering(config=config) + result = model( + input_ids, + bbox=bbox, + pixel_values=pixel_values, + attention_mask=input_mask, + token_type_ids=token_type_ids, + start_positions=sequence_labels, + end_positions=sequence_labels, + training=False, + ) + self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) + self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + (config, input_ids, bbox, pixel_values, token_type_ids, input_mask, _, _) = config_and_inputs + inputs_dict = { + "input_ids": input_ids, + "bbox": bbox, + "pixel_values": pixel_values, + "token_type_ids": token_type_ids, + "attention_mask": input_mask, + } + return config, inputs_dict + + +@require_tf +class TFLayoutLMv3ModelTest(TFModelTesterMixin, unittest.TestCase): + + all_model_classes = ( + ( + TFLayoutLMv3Model, + TFLayoutLMv3ForQuestionAnswering, + TFLayoutLMv3ForSequenceClassification, + TFLayoutLMv3ForTokenClassification, + ) + if is_tf_available() + else () + ) + + test_pruning = False + test_resize_embeddings = False + test_onnx = False + + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: + inputs_dict = copy.deepcopy(inputs_dict) + + if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): + inputs_dict = { + k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) + if isinstance(v, tf.Tensor) and v.ndim > 0 + else v + for k, v in inputs_dict.items() + } + + if return_labels: + if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): + inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) + elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING): + inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) + inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) + elif model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): + inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) + elif model_class in get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING): + inputs_dict["labels"] = tf.zeros( + (self.model_tester.batch_size, self.model_tester.text_seq_length), dtype=tf.int32 + ) + + return inputs_dict + + def setUp(self): + self.model_tester = TFLayoutLMv3ModelTester(self) + self.config_tester = ConfigTester(self, config_class=LayoutLMv3Config, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_loss_computation(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + for model_class in self.all_model_classes: + model = model_class(config) + if getattr(model, "hf_compute_loss", None): + # The number of elements in the loss should be the same as the number of elements in the label + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + added_label = prepared_for_class[ + sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0] + ] + expected_loss_size = added_label.shape.as_list()[:1] + + # Test that model correctly compute the loss with kwargs + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + input_ids = prepared_for_class.pop("input_ids") + + loss = model(input_ids, **prepared_for_class)[0] + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + + # Test that model correctly compute the loss when we mask some positions + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + input_ids = prepared_for_class.pop("input_ids") + if "labels" in prepared_for_class: + labels = prepared_for_class["labels"].numpy() + if len(labels.shape) > 1 and labels.shape[1] != 1: + labels[0] = -100 + prepared_for_class["labels"] = tf.convert_to_tensor(labels) + loss = model(input_ids, **prepared_for_class)[0] + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + self.assertTrue(not np.any(np.isnan(loss.numpy()))) + + # Test that model correctly compute the loss with a dict + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + loss = model(prepared_for_class)[0] + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + + # Test that model correctly compute the loss with a tuple + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + + # Get keys that were added with the _prepare_for_class function + label_keys = prepared_for_class.keys() - inputs_dict.keys() + signature = inspect.signature(model.call).parameters + signature_names = list(signature.keys()) + + # Create a dictionary holding the location of the tensors in the tuple + tuple_index_mapping = {0: "input_ids"} + for label_key in label_keys: + label_key_index = signature_names.index(label_key) + tuple_index_mapping[label_key_index] = label_key + sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) + # Initialize a list with their default values, update the values and convert to a tuple + list_input = [] + + for name in signature_names: + if name != "kwargs": + list_input.append(signature[name].default) + + for index, value in sorted_tuple_index_mapping: + list_input[index] = prepared_for_class[value] + + tuple_input = tuple(list_input) + + # Send to model + loss = model(tuple_input[:-1])[0] + + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + + def test_model(self): + ( + config, + input_ids, + bbox, + pixel_values, + token_type_ids, + input_mask, + _, + _, + ) = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(config, input_ids, bbox, pixel_values, token_type_ids, input_mask) + + def test_model_various_embeddings(self): + ( + config, + input_ids, + bbox, + pixel_values, + token_type_ids, + input_mask, + _, + _, + ) = self.model_tester.prepare_config_and_inputs() + for type in ["absolute", "relative_key", "relative_key_query"]: + config.position_embedding_type = type + self.model_tester.create_and_check_model(config, input_ids, bbox, pixel_values, token_type_ids, input_mask) + + def test_for_sequence_classification(self): + ( + config, + input_ids, + bbox, + pixel_values, + token_type_ids, + input_mask, + sequence_labels, + _, + ) = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_sequence_classification( + config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels + ) + + def test_for_token_classification(self): + ( + config, + input_ids, + bbox, + pixel_values, + token_type_ids, + input_mask, + _, + token_labels, + ) = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_token_classification( + config, input_ids, bbox, pixel_values, token_type_ids, input_mask, token_labels + ) + + def test_for_question_answering(self): + ( + config, + input_ids, + bbox, + pixel_values, + token_type_ids, + input_mask, + sequence_labels, + _, + ) = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_question_answering( + config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels + ) + + @slow + def test_model_from_pretrained(self): + for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = TFLayoutLMv3Model.from_pretrained(model_name) + self.assertIsNotNone(model) + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_tf +class TFLayoutLMv3ModelIntegrationTest(unittest.TestCase): + @cached_property + def default_feature_extractor(self): + return LayoutLMv3FeatureExtractor(apply_ocr=False) if is_vision_available() else None + + @slow + def test_inference_no_head(self): + model = TFLayoutLMv3Model.from_pretrained("microsoft/layoutlmv3-base") + + feature_extractor = self.default_feature_extractor + image = prepare_img() + pixel_values = feature_extractor(images=image, return_tensors="tf").pixel_values + + input_ids = tf.constant([[1, 2]]) + bbox = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]]), axis=0) + + # forward pass + outputs = model(input_ids=input_ids, bbox=bbox, pixel_values=pixel_values, training=False) + + # verify the logits + expected_shape = (1, 199, 768) + self.assertEqual(outputs.last_hidden_state.shape, expected_shape) + + expected_slice = tf.constant( + [[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] + ) + + self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 0edda8ae5a4c3c..b03dcf51173108 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -38,6 +38,7 @@ src/transformers/models/gptj/modeling_gptj.py src/transformers/models/hubert/modeling_hubert.py src/transformers/models/layoutlmv2/modeling_layoutlmv2.py src/transformers/models/layoutlmv3/modeling_layoutlmv3.py +src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py src/transformers/models/longformer/modeling_longformer.py src/transformers/models/longformer/modeling_tf_longformer.py src/transformers/models/longt5/modeling_longt5.py From ef91a2d135f3fa43c89511f7c11ae3543e260692 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Tue, 30 Aug 2022 13:03:28 +0100 Subject: [PATCH 161/539] Run tests if skip condition not met (#18764) * Run tests if skip condition not met * Update comment - remove outdated ref to TF 2.8 --- tests/models/convnext/test_modeling_tf_convnext.py | 6 +++--- tests/models/regnet/test_modeling_tf_regnet.py | 4 ++-- tests/models/segformer/test_modeling_tf_segformer.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/models/convnext/test_modeling_tf_convnext.py b/tests/models/convnext/test_modeling_tf_convnext.py index bc84cd0a40007e..9a0d3140e96e6c 100644 --- a/tests/models/convnext/test_modeling_tf_convnext.py +++ b/tests/models/convnext/test_modeling_tf_convnext.py @@ -145,10 +145,10 @@ def test_inputs_embeds(self): @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, - reason="TF (<=2.8) does not support backprop for grouped convolutions on CPU.", + reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_keras_fit(self): - pass + super().test_keras_fit() @unittest.skip(reason="ConvNext does not support input and output embeddings") def test_model_common_attributes(self): @@ -176,7 +176,7 @@ def test_attention_outputs(self): @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, - reason="TF (<=2.8) does not support backprop for grouped convolutions on CPU.", + reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() diff --git a/tests/models/regnet/test_modeling_tf_regnet.py b/tests/models/regnet/test_modeling_tf_regnet.py index c7504c92fa35b0..3b426fcd0817fc 100644 --- a/tests/models/regnet/test_modeling_tf_regnet.py +++ b/tests/models/regnet/test_modeling_tf_regnet.py @@ -138,10 +138,10 @@ def test_inputs_embeds(self): @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, - reason="TF (<=2.8) does not support backprop for grouped convolutions on CPU.", + reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_keras_fit(self): - pass + super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings") def test_model_common_attributes(self): diff --git a/tests/models/segformer/test_modeling_tf_segformer.py b/tests/models/segformer/test_modeling_tf_segformer.py index d6a73e22192c3b..dfdb24f37b17ee 100644 --- a/tests/models/segformer/test_modeling_tf_segformer.py +++ b/tests/models/segformer/test_modeling_tf_segformer.py @@ -332,7 +332,7 @@ def recursive_check(tuple_object, dict_object): @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, - reason="TF (<=2.8) does not support backprop for grouped convolutions on CPU.", + reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_dataset_conversion(self): super().test_dataset_conversion() @@ -342,7 +342,7 @@ def check_keras_fit_results(self, val_loss1, val_loss2, atol=2e-1, rtol=2e-1): @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, - reason="TF (<=2.8) does not support backprop for grouped convolutions on CPU.", + reason="TF does not support backprop for grouped convolutions on CPU.", ) def test_keras_fit(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() From b83796ded7725d0cb64ea7258ef95e395151a211 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Tue, 30 Aug 2022 14:15:36 +0200 Subject: [PATCH 162/539] Remove ViltForQuestionAnswering from check_repo (#18762) Co-authored-by: Niels Rogge --- utils/check_repo.py | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/check_repo.py b/utils/check_repo.py index 254467113d6cb4..1c5f3709530d94 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -128,7 +128,6 @@ "DPTForDepthEstimation", "DecisionTransformerGPT2Model", "GLPNForDepthEstimation", - "ViltForQuestionAnswering", "ViltForImagesAndTextClassification", "ViltForImageAndTextRetrieval", "ViltForTokenClassification", From 46d0e26a276f18157223ee0474560bcc78d74920 Mon Sep 17 00:00:00 2001 From: Dhruv Karan Date: Tue, 30 Aug 2022 18:00:59 +0530 Subject: [PATCH 163/539] Adds OWLViT to models exportable with ONNX (#18588) * onnx conversion for owlvit * .T to .t() * dynamic shapes for pixel values --- docs/source/en/serialization.mdx | 1 + src/transformers/models/owlvit/__init__.py | 2 + .../models/owlvit/configuration_owlvit.py | 50 ++++++++++++++++++- .../models/owlvit/modeling_owlvit.py | 7 ++- src/transformers/onnx/features.py | 4 ++ tests/onnx/test_onnx_v2.py | 1 + 6 files changed, 62 insertions(+), 3 deletions(-) diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index 89b73df4f5dfa6..11336c61a49f1d 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -83,6 +83,7 @@ Ready-made configurations include the following architectures: - MobileViT - MT5 - OpenAI GPT-2 +- OWL-ViT - Perceiver - PLBart - ResNet diff --git a/src/transformers/models/owlvit/__init__.py b/src/transformers/models/owlvit/__init__.py index 8315df69faace0..cc528d315eb3fd 100644 --- a/src/transformers/models/owlvit/__init__.py +++ b/src/transformers/models/owlvit/__init__.py @@ -32,6 +32,7 @@ "configuration_owlvit": [ "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OwlViTConfig", + "OwlViTOnnxConfig", "OwlViTTextConfig", "OwlViTVisionConfig", ], @@ -66,6 +67,7 @@ from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, + OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) diff --git a/src/transformers/models/owlvit/configuration_owlvit.py b/src/transformers/models/owlvit/configuration_owlvit.py index 85ffdbadbeff39..ff0bd6e6120d05 100644 --- a/src/transformers/models/owlvit/configuration_owlvit.py +++ b/src/transformers/models/owlvit/configuration_owlvit.py @@ -16,9 +16,16 @@ import copy import os -from typing import Dict, Union +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union + + +if TYPE_CHECKING: + from ...processing_utils import ProcessorMixin + from ...utils import TensorType from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig from ...utils import logging @@ -334,3 +341,44 @@ def to_dict(self): output["vision_config"] = self.vision_config.to_dict() output["model_type"] = self.__class__.model_type return output + + +class OwlViTOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("input_ids", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ("attention_mask", {0: "batch", 1: "sequence"}), + ] + ) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("logits_per_image", {0: "batch"}), + ("logits_per_text", {0: "batch"}), + ("text_embeds", {0: "batch"}), + ("image_embeds", {0: "batch"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + def generate_dummy_inputs( + self, + processor: "ProcessorMixin", + framework: Optional["TensorType"] = None, + ) -> Mapping[str, Any]: + + text_input_dict = super().generate_dummy_inputs(processor.tokenizer, framework=framework) + image_input_dict = super().generate_dummy_inputs(processor.feature_extractor, framework=framework) + return {**text_input_dict, **image_input_dict} + + @property + def default_onnx_opset(self) -> int: + return 14 diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index c0386ab23d3fba..5ff22c901a351c 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -687,7 +687,10 @@ def forward( last_hidden_state = self.final_layer_norm(last_hidden_state) # take features from the end of tokens embedding (end of token is the highest number in each sequence) - pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)] + # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 + pooled_output = last_hidden_state[ + torch.arange(last_hidden_state.shape[0]), input_ids.to(torch.int).argmax(dim=-1) + ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] @@ -1066,7 +1069,7 @@ def forward( # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale - logits_per_image = logits_per_text.T + logits_per_image = logits_per_text.t() loss = None if return_loss: diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index eb57df1c960364..879ba1c262fac5 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -416,6 +416,10 @@ class FeaturesManager: "seq2seq-lm-with-past", onnx_config_cls="models.m2m_100.M2M100OnnxConfig", ), + "owlvit": supported_features_mapping( + "default", + onnx_config_cls="models.owlvit.OwlViTOnnxConfig", + ), "perceiver": supported_features_mapping( "image-classification", "masked-lm", diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index 52ced984ca8007..3872f1dfa06ac1 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -205,6 +205,7 @@ def test_values_override(self): ("layoutlm", "microsoft/layoutlm-base-uncased"), ("layoutlmv3", "microsoft/layoutlmv3-base"), ("levit", "facebook/levit-128S"), + ("owlvit", "google/owlvit-base-patch32"), ("vit", "google/vit-base-patch16-224"), ("deit", "facebook/deit-small-patch16-224"), ("beit", "microsoft/beit-base-patch16-224"), From 220da3b8a1cde5870696369a02227f9211d626be Mon Sep 17 00:00:00 2001 From: Dhruv Karan Date: Tue, 30 Aug 2022 18:01:35 +0530 Subject: [PATCH 164/539] Adds GroupViT to models exportable with ONNX (#18628) * groupvit to onnx * dynamic shape for pixel values dim --- docs/source/en/serialization.mdx | 1 + src/transformers/models/groupvit/__init__.py | 2 + .../models/groupvit/configuration_groupvit.py | 50 ++++++++++++++++++- .../models/groupvit/modeling_groupvit.py | 2 +- src/transformers/onnx/features.py | 4 ++ tests/onnx/test_onnx_v2.py | 1 + 6 files changed, 58 insertions(+), 2 deletions(-) diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index 11336c61a49f1d..d6bf15df7ffcc4 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -70,6 +70,7 @@ Ready-made configurations include the following architectures: - FlauBERT - GPT Neo - GPT-J +- GroupViT - I-BERT - LayoutLM - LayoutLMv3 diff --git a/src/transformers/models/groupvit/__init__.py b/src/transformers/models/groupvit/__init__.py index 8d902054975bc4..3985e9ecff5d73 100644 --- a/src/transformers/models/groupvit/__init__.py +++ b/src/transformers/models/groupvit/__init__.py @@ -24,6 +24,7 @@ "configuration_groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "GroupViTConfig", + "GroupViTOnnxConfig", "GroupViTTextConfig", "GroupViTVisionConfig", ], @@ -47,6 +48,7 @@ from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, + GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) diff --git a/src/transformers/models/groupvit/configuration_groupvit.py b/src/transformers/models/groupvit/configuration_groupvit.py index 8940cf40b9f1ba..895c0608b730f9 100644 --- a/src/transformers/models/groupvit/configuration_groupvit.py +++ b/src/transformers/models/groupvit/configuration_groupvit.py @@ -16,12 +16,19 @@ import copy import os -from typing import Union +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig from ...utils import logging +if TYPE_CHECKING: + from ...processing_utils import ProcessorMixin + from ...utils import TensorType + + logger = logging.get_logger(__name__) GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP = { @@ -343,3 +350,44 @@ def to_dict(self): output["vision_config"] = self.vision_config.to_dict() output["model_type"] = self.__class__.model_type return output + + +class GroupViTOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("input_ids", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ("attention_mask", {0: "batch", 1: "sequence"}), + ] + ) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("logits_per_image", {0: "batch"}), + ("logits_per_text", {0: "batch"}), + ("text_embeds", {0: "batch"}), + ("image_embeds", {0: "batch"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + def generate_dummy_inputs( + self, + processor: "ProcessorMixin", + framework: Optional["TensorType"] = None, + ) -> Mapping[str, Any]: + + text_input_dict = super().generate_dummy_inputs(processor.tokenizer, framework=framework) + image_input_dict = super().generate_dummy_inputs(processor.feature_extractor, framework=framework) + return {**text_input_dict, **image_input_dict} + + @property + def default_onnx_opset(self) -> int: + return 14 diff --git a/src/transformers/models/groupvit/modeling_groupvit.py b/src/transformers/models/groupvit/modeling_groupvit.py index 9817065ab37a55..3d2f78e3cfd3b2 100644 --- a/src/transformers/models/groupvit/modeling_groupvit.py +++ b/src/transformers/models/groupvit/modeling_groupvit.py @@ -1542,7 +1542,7 @@ def forward( # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale - logits_per_image = logits_per_text.T + logits_per_image = logits_per_text.t() seg_logits = None if output_segmentation: diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index 879ba1c262fac5..b1ea30c1afc7ae 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -326,6 +326,10 @@ class FeaturesManager: "sequence-classification", onnx_config_cls="models.gpt_neo.GPTNeoOnnxConfig", ), + "groupvit": supported_features_mapping( + "default", + onnx_config_cls="models.groupvit.GroupViTOnnxConfig", + ), "ibert": supported_features_mapping( "default", "masked-lm", diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index 3872f1dfa06ac1..16ee78a63c4874 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -204,6 +204,7 @@ def test_values_override(self): ("xlm-roberta", "xlm-roberta-base"), ("layoutlm", "microsoft/layoutlm-base-uncased"), ("layoutlmv3", "microsoft/layoutlmv3-base"), + ("groupvit", "nvidia/groupvit-gcc-yfcc"), ("levit", "facebook/levit-128S"), ("owlvit", "google/owlvit-base-patch32"), ("vit", "google/vit-base-patch16-224"), From a98f6a1da012ca7847e4dceb3ffcedfd75a77b08 Mon Sep 17 00:00:00 2001 From: anthony2261 Date: Tue, 30 Aug 2022 15:43:14 +0300 Subject: [PATCH 165/539] LayoutXLMProcessor: ensure 1-to-1 mapping between samples and images, and add test for it (#18774) --- .../models/layoutxlm/processing_layoutxlm.py | 3 ++ .../layoutxlm/test_processor_layoutxlm.py | 34 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/src/transformers/models/layoutxlm/processing_layoutxlm.py b/src/transformers/models/layoutxlm/processing_layoutxlm.py index 03423d17c27be2..da75398493d7ec 100644 --- a/src/transformers/models/layoutxlm/processing_layoutxlm.py +++ b/src/transformers/models/layoutxlm/processing_layoutxlm.py @@ -89,6 +89,9 @@ def __call__( "You cannot provide word labels if you initialized the feature extractor with apply_ocr set to True." ) + if return_overflowing_tokens is True and return_offsets_mapping is False: + raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.") + # first, apply the feature extractor features = self.feature_extractor(images=images, return_tensors=return_tensors) diff --git a/tests/models/layoutxlm/test_processor_layoutxlm.py b/tests/models/layoutxlm/test_processor_layoutxlm.py index d0d7eec28a34a9..2752bd16a82bbf 100644 --- a/tests/models/layoutxlm/test_processor_layoutxlm.py +++ b/tests/models/layoutxlm/test_processor_layoutxlm.py @@ -126,6 +126,40 @@ def test_save_load_pretrained_additional_features(self): self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, LayoutLMv2FeatureExtractor) + @slow + def test_overflowing_tokens(self): + # In the case of overflowing tokens, test that we still have 1-to-1 mapping between the images and input_ids (sequences that are too long are broken down into multiple sequences). + + from datasets import load_dataset + + # set up + datasets = load_dataset("nielsr/funsd") + processor = LayoutXLMProcessor.from_pretrained("microsoft/layoutxlm-base", apply_ocr=False) + + def preprocess_data(examples): + images = [Image.open(path).convert("RGB") for path in examples["image_path"]] + words = examples["words"] + boxes = examples["bboxes"] + word_labels = examples["ner_tags"] + encoded_inputs = processor( + images, + words, + boxes=boxes, + word_labels=word_labels, + max_length=512, + padding="max_length", + truncation=True, + return_overflowing_tokens=True, + stride=50, + return_offsets_mapping=True, + return_tensors="pt", + ) + return encoded_inputs + + train_data = preprocess_data(datasets["train"]) + + self.assertEqual(len(train_data["image"]), len(train_data["input_ids"])) + # different use cases tests @require_sentencepiece From 5727dfcebe63b0bcdfe06f1d4e2670cb329037e3 Mon Sep 17 00:00:00 2001 From: Dan Tegzes <48134725+Tegzes@users.noreply.github.com> Date: Tue, 30 Aug 2022 15:46:21 +0300 Subject: [PATCH 166/539] Added Docstrings for Deberta and DebertaV2 [PyTorch] (#18610) * Added Doctest for Deberta Pytorch * Added path in documentation test file * Added docstrings for DebertaV2 * Revert "Added docstrings for DebertaV2" This reverts commit 307185e62a21b3bd0923444cc8a8af1747fd2600. * Added DebertaV2 Docstrings --- .../models/deberta/modeling_deberta.py | 45 +++++++++++++++++-- .../models/deberta_v2/modeling_deberta_v2.py | 44 ++++++++++++++++-- src/transformers/utils/doc.py | 2 +- utils/documentation_tests.txt | 2 + 4 files changed, 84 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/deberta/modeling_deberta.py b/src/transformers/models/deberta/modeling_deberta.py index 0fbb66ba8fd054..581d9d1f4f2d35 100644 --- a/src/transformers/models/deberta/modeling_deberta.py +++ b/src/transformers/models/deberta/modeling_deberta.py @@ -41,6 +41,32 @@ _TOKENIZER_FOR_DOC = "DebertaTokenizer" _CHECKPOINT_FOR_DOC = "microsoft/deberta-base" +# Masked LM docstring +_CHECKPOINT_FOR_MASKED_LM = "lsanochkin/deberta-large-feedback" +_MASKED_LM_EXPECTED_OUTPUT = "' Paris'" +_MASKED_LM_EXPECTED_LOSS = "0.54" + +# TokenClassification docstring +_CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "dbsamu/deberta-base-finetuned-ner" +_TOKEN_CLASS_EXPECTED_OUTPUT = ( + "['LABEL_0', 'LABEL_0', 'LABEL_0', 'LABEL_0', 'LABEL_0', 'LABEL_0', 'LABEL_0', 'LABEL_0', 'LABEL_0', 'LABEL_0'," + " 'LABEL_0', 'LABEL_0']" +) +_TOKEN_CLASS_EXPECTED_LOSS = 0.04 + +# QuestionAnswering docstring +_CHECKPOINT_FOR_QA = "Palak/microsoft_deberta-large_squad" +_QA_EXPECTED_OUTPUT = "' a nice puppet'" +_QA_EXPECTED_LOSS = 0.14 +_QA_TARGET_START_INDEX = 12 +_QA_TARGET_END_INDEX = 14 + +# SequenceClassification docstring +_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "hf-internal-testing/tiny-random-deberta" +_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" +_SEQ_CLASS_EXPECTED_LOSS = "0.69" + + DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/deberta-base", "microsoft/deberta-large", @@ -1032,9 +1058,12 @@ def set_output_embeddings(self, new_embeddings): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_MASKED_LM, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, + mask="[MASK]", + expected_output=_MASKED_LM_EXPECTED_OUTPUT, + expected_loss=_MASKED_LM_EXPECTED_LOSS, ) def forward( self, @@ -1173,9 +1202,11 @@ def set_input_embeddings(self, new_embeddings): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, + expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, + expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def forward( self, @@ -1281,9 +1312,11 @@ def __init__(self, config): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, + expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, + expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, ) def forward( self, @@ -1356,9 +1389,13 @@ def __init__(self, config): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_QA, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, + expected_output=_QA_EXPECTED_OUTPUT, + expected_loss=_QA_EXPECTED_LOSS, + qa_target_start_index=_QA_TARGET_START_INDEX, + qa_target_end_index=_QA_TARGET_END_INDEX, ) def forward( self, diff --git a/src/transformers/models/deberta_v2/modeling_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_deberta_v2.py index 1a9252a7d30707..46f7e00b96f21c 100644 --- a/src/transformers/models/deberta_v2/modeling_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_deberta_v2.py @@ -43,6 +43,31 @@ _TOKENIZER_FOR_DOC = "DebertaV2Tokenizer" _CHECKPOINT_FOR_DOC = "microsoft/deberta-v2-xlarge" +# Masked LM docstring +_CHECKPOINT_FOR_MASKED_LM = "hf-internal-testing/tiny-random-deberta-v2" +_MASKED_LM_EXPECTED_OUTPUT = "'enberry'" +_MASKED_LM_EXPECTED_LOSS = "11.85" + +# TokenClassification docstring +_CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "hf-internal-testing/tiny-random-deberta-v2" +_TOKEN_CLASS_EXPECTED_OUTPUT = ( + "['LABEL_0', 'LABEL_0', 'LABEL_1', 'LABEL_0', 'LABEL_0', 'LABEL_1', 'LABEL_0', 'LABEL_0', 'LABEL_0', 'LABEL_0'," + " 'LABEL_0', 'LABEL_0']" +) +_TOKEN_CLASS_EXPECTED_LOSS = 0.61 + +# QuestionAnswering docstring +_CHECKPOINT_FOR_QA = "hf-internal-testing/tiny-random-deberta-v2" +_QA_EXPECTED_OUTPUT = "'was Jim Henson? Jim Henson was'" +_QA_EXPECTED_LOSS = 2.47 +_QA_TARGET_START_INDEX = 2 +_QA_TARGET_END_INDEX = 9 + +# SequenceClassification docstring +_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "hf-internal-testing/tiny-random-deberta-v2" +_SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'" +_SEQ_CLASS_EXPECTED_LOSS = "0.69" + DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/deberta-v2-xlarge", "microsoft/deberta-v2-xxlarge", @@ -1136,9 +1161,12 @@ def set_output_embeddings(self, new_embeddings): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_MASKED_LM, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, + mask="[MASK]", + expected_output=_MASKED_LM_EXPECTED_OUTPUT, + expected_loss=_MASKED_LM_EXPECTED_LOSS, ) def forward( self, @@ -1278,9 +1306,11 @@ def set_input_embeddings(self, new_embeddings): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, + expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, + expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def forward( self, @@ -1387,9 +1417,11 @@ def __init__(self, config): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, + expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, + expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, ) def forward( self, @@ -1463,9 +1495,13 @@ def __init__(self, config): @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( processor_class=_TOKENIZER_FOR_DOC, - checkpoint=_CHECKPOINT_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_QA, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, + expected_output=_QA_EXPECTED_OUTPUT, + expected_loss=_QA_EXPECTED_LOSS, + qa_target_start_index=_QA_TARGET_START_INDEX, + qa_target_end_index=_QA_TARGET_END_INDEX, ) def forward( self, diff --git a/src/transformers/utils/doc.py b/src/transformers/utils/doc.py index 6761dec9c96905..9e3c7fce709487 100644 --- a/src/transformers/utils/doc.py +++ b/src/transformers/utils/doc.py @@ -242,7 +242,7 @@ def _prepare_output_docstrings(output_type, config_class, min_indent=None): >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) - >>> labels = torch.tensor(1) + >>> labels = torch.tensor([1]) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index b03dcf51173108..7545b4986078b2 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -26,6 +26,8 @@ src/transformers/models/ctrl/modeling_ctrl.py src/transformers/models/cvt/modeling_cvt.py src/transformers/models/data2vec/modeling_data2vec_audio.py src/transformers/models/data2vec/modeling_data2vec_vision.py +src/transformers/models/deberta/modeling_deberta.py +src/transformers/models/deberta_v2/modeling_deberta_v2.py src/transformers/models/deit/modeling_deit.py src/transformers/models/deit/modeling_tf_deit.py src/transformers/models/detr/modeling_detr.py From 73c6273d481f9052098a2a5a5f001fa75daaace9 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 30 Aug 2022 15:29:48 +0200 Subject: [PATCH 167/539] Improving the documentation for "word", within the pipeline. (#18763) * Improving the documentation for "word", within the pipeline. * Quality. --- src/transformers/pipelines/token_classification.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/pipelines/token_classification.py b/src/transformers/pipelines/token_classification.py index 04a80b32dd58cf..0dd91761899c24 100644 --- a/src/transformers/pipelines/token_classification.py +++ b/src/transformers/pipelines/token_classification.py @@ -172,7 +172,8 @@ def __call__(self, inputs: Union[str, List[str]], **kwargs): corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with the following keys: - - **word** (`str`) -- The token/word classified. + - **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you + want to have the exact string in the original sentence, use `start` and `stop`. - **score** (`float`) -- The corresponding probability for `entity`. - **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when *aggregation_strategy* is not `"none"`. From e88e9ff045347c9d92d85806a6987dc7ebcbdd5b Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 30 Aug 2022 18:33:09 +0200 Subject: [PATCH 168/539] Disable nightly CI temporarily (#18820) Co-authored-by: ydshieh --- .github/workflows/self-nightly-scheduled.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index 5dca2c07b1eaa1..371620c5438813 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -8,8 +8,9 @@ name: Self-hosted runner (nightly) on: repository_dispatch: - schedule: - - cron: "0 16 * * *" +# Disable temporarily until the test suite can be run under 12 hours. +# schedule: +# - cron: "0 16 * * *" env: HF_HOME: /mnt/cache From 5c4c869014f5839d04c1fd28133045df0c91fd84 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 31 Aug 2022 01:05:33 -0700 Subject: [PATCH 169/539] Add LayoutLMForQuestionAnswering model (#18407) * Add LayoutLMForQuestionAnswering model * Fix output * Remove TF TODOs * Add test cases * Add docs * TF implementation * Fix PT/TF equivalence * Fix loss * make fixup * Fix up documentation code examples * Fix up documentation examples + test them * Remove LayoutLMForQuestionAnswering from the auto mapping * Docstrings * Add better docstrings * Undo whitespace changes * Update tokenizers in comments * Fixup code and remove `from_pt=True` * Fix tests * Revert some unexpected docstring changes * Fix tests by overriding _prepare_for_class Co-authored-by: Ankur Goyal --- docs/source/en/model_doc/layoutlm.mdx | 8 + src/transformers/__init__.py | 4 + src/transformers/models/layoutlm/__init__.py | 4 + .../models/layoutlm/modeling_layoutlm.py | 162 ++++++++++++++++- .../models/layoutlm/modeling_tf_layoutlm.py | 166 +++++++++++++++++- src/transformers/utils/dummy_pt_objects.py | 7 + src/transformers/utils/dummy_tf_objects.py | 7 + src/transformers/utils/fx.py | 2 + .../models/layoutlm/test_modeling_layoutlm.py | 68 ++++++- .../layoutlm/test_modeling_tf_layoutlm.py | 62 ++++++- utils/check_repo.py | 2 + utils/documentation_tests.txt | 2 + 12 files changed, 474 insertions(+), 20 deletions(-) diff --git a/docs/source/en/model_doc/layoutlm.mdx b/docs/source/en/model_doc/layoutlm.mdx index b1ee2a8cdbbc52..e463c67d91a03a 100644 --- a/docs/source/en/model_doc/layoutlm.mdx +++ b/docs/source/en/model_doc/layoutlm.mdx @@ -107,6 +107,10 @@ This model was contributed by [liminghao1630](https://huggingface.co/liminghao16 [[autodoc]] LayoutLMForTokenClassification +## LayoutLMForQuestionAnswering + +[[autodoc]] LayoutLMForQuestionAnswering + ## TFLayoutLMModel [[autodoc]] TFLayoutLMModel @@ -122,3 +126,7 @@ This model was contributed by [liminghao1630](https://huggingface.co/liminghao16 ## TFLayoutLMForTokenClassification [[autodoc]] TFLayoutLMForTokenClassification + +## TFLayoutLMForQuestionAnswering + +[[autodoc]] TFLayoutLMForQuestionAnswering diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index bb64fe9295dad7..aff905b97ec5d7 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -1305,6 +1305,7 @@ "LayoutLMForMaskedLM", "LayoutLMForSequenceClassification", "LayoutLMForTokenClassification", + "LayoutLMForQuestionAnswering", "LayoutLMModel", "LayoutLMPreTrainedModel", ] @@ -2337,6 +2338,7 @@ "TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLayoutLMForMaskedLM", "TFLayoutLMForSequenceClassification", + "TFLayoutLMForQuestionAnswering", "TFLayoutLMForTokenClassification", "TFLayoutLMMainLayer", "TFLayoutLMModel", @@ -3945,6 +3947,7 @@ from .models.layoutlm import ( LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMForMaskedLM, + LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, @@ -4583,6 +4586,7 @@ from .modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, + TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMMainLayer, diff --git a/src/transformers/models/layoutlm/__init__.py b/src/transformers/models/layoutlm/__init__.py index a7ccae38e89e19..cbedd3868d75f5 100644 --- a/src/transformers/models/layoutlm/__init__.py +++ b/src/transformers/models/layoutlm/__init__.py @@ -51,6 +51,7 @@ "LayoutLMForMaskedLM", "LayoutLMForSequenceClassification", "LayoutLMForTokenClassification", + "LayoutLMForQuestionAnswering", "LayoutLMModel", "LayoutLMPreTrainedModel", ] @@ -66,6 +67,7 @@ "TFLayoutLMForMaskedLM", "TFLayoutLMForSequenceClassification", "TFLayoutLMForTokenClassification", + "TFLayoutLMForQuestionAnswering", "TFLayoutLMMainLayer", "TFLayoutLMModel", "TFLayoutLMPreTrainedModel", @@ -93,6 +95,7 @@ from .modeling_layoutlm import ( LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMForMaskedLM, + LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, @@ -107,6 +110,7 @@ from .modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, + TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMMainLayer, diff --git a/src/transformers/models/layoutlm/modeling_layoutlm.py b/src/transformers/models/layoutlm/modeling_layoutlm.py index e3a625416a7d1b..0b1970210e57bc 100644 --- a/src/transformers/models/layoutlm/modeling_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_layoutlm.py @@ -28,6 +28,7 @@ BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, MaskedLMOutput, + QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) @@ -40,7 +41,6 @@ logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LayoutLMConfig" -_TOKENIZER_FOR_DOC = "LayoutLMTokenizer" _CHECKPOINT_FOR_DOC = "microsoft/layoutlm-base-uncased" LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ @@ -749,10 +749,10 @@ def forward( Examples: ```python - >>> from transformers import LayoutLMTokenizer, LayoutLMModel + >>> from transformers import AutoTokenizer, LayoutLMModel >>> import torch - >>> tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") + >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = LayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "world"] @@ -896,10 +896,10 @@ def forward( Examples: ```python - >>> from transformers import LayoutLMTokenizer, LayoutLMForMaskedLM + >>> from transformers import AutoTokenizer, LayoutLMForMaskedLM >>> import torch - >>> tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") + >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = LayoutLMForMaskedLM.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "[MASK]"] @@ -1018,10 +1018,10 @@ def forward( Examples: ```python - >>> from transformers import LayoutLMTokenizer, LayoutLMForSequenceClassification + >>> from transformers import AutoTokenizer, LayoutLMForSequenceClassification >>> import torch - >>> tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") + >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = LayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "world"] @@ -1153,10 +1153,10 @@ def forward( Examples: ```python - >>> from transformers import LayoutLMTokenizer, LayoutLMForTokenClassification + >>> from transformers import AutoTokenizer, LayoutLMForTokenClassification >>> import torch - >>> tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") + >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "world"] @@ -1222,3 +1222,147 @@ def forward( hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) + + +@add_start_docstrings( + """ + LayoutLM Model with a span classification head on top for extractive question-answering tasks such as + [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the final hidden-states output to compute `span + start logits` and `span end logits`). + """, + LAYOUTLM_START_DOCSTRING, +) +class LayoutLMForQuestionAnswering(LayoutLMPreTrainedModel): + def __init__(self, config, has_visual_segment_embedding=True): + super().__init__(config) + self.num_labels = config.num_labels + + self.layoutlm = LayoutLMModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.layoutlm.embeddings.word_embeddings + + @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + bbox: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + + Returns: + + Example: + + In the example below, we prepare a question + context pair for the LayoutLM model. It will give us a prediction + of what it thinks the answer is (the span of the answer within the texts parsed from the image). + + ```python + >>> from transformers import AutoTokenizer, LayoutLMForQuestionAnswering + >>> from datasets import load_dataset + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("impira/layoutlm-document-qa", add_prefix_space=True) + >>> model = LayoutLMForQuestionAnswering.from_pretrained("impira/layoutlm-document-qa") + + >>> dataset = load_dataset("nielsr/funsd", split="train") + >>> example = dataset[0] + >>> question = "what's his name?" + >>> words = example["words"] + >>> boxes = example["bboxes"] + + >>> encoding = tokenizer( + ... question.split(), words, is_split_into_words=True, return_token_type_ids=True, return_tensors="pt" + ... ) + >>> bbox = [] + >>> for i, s, w in zip(encoding.input_ids[0], encoding.sequence_ids(0), encoding.word_ids(0)): + ... if s == 1: + ... bbox.append(boxes[w]) + ... elif i == tokenizer.sep_token_id: + ... bbox.append([1000] * 4) + ... else: + ... bbox.append([0] * 4) + >>> encoding["bbox"] = torch.tensor([bbox]) + + >>> word_ids = encoding.word_ids(0) + >>> outputs = model(**encoding) + >>> loss = outputs.loss + >>> start_scores = outputs.start_logits + >>> end_scores = outputs.end_logits + >>> start, end = word_ids[start_scores.argmax(-1)], word_ids[end_scores.argmax(-1)] + >>> print(" ".join(words[start : end + 1])) + M. Hamann P. Harper, P. Martinez + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.layoutlm( + input_ids=input_ids, + bbox=bbox, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py index d15fc29b7366d1..8c28651ab8f72c 100644 --- a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py @@ -26,6 +26,7 @@ TFBaseModelOutputWithPastAndCrossAttentions, TFBaseModelOutputWithPoolingAndCrossAttentions, TFMaskedLMOutput, + TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) @@ -33,6 +34,7 @@ TFMaskedLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, + TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFTokenClassificationLoss, get_initializer, @@ -47,7 +49,6 @@ logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LayoutLMConfig" -_TOKENIZER_FOR_DOC = "LayoutLMTokenizer" TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/layoutlm-base-uncased", @@ -934,10 +935,10 @@ def call( Examples: ```python - >>> from transformers import LayoutLMTokenizer, TFLayoutLMModel + >>> from transformers import AutoTokenizer, TFLayoutLMModel >>> import tensorflow as tf - >>> tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") + >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "world"] @@ -1058,10 +1059,10 @@ def call( Examples: ```python - >>> from transformers import LayoutLMTokenizer, TFLayoutLMForMaskedLM + >>> from transformers import AutoTokenizer, TFLayoutLMForMaskedLM >>> import tensorflow as tf - >>> tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") + >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = TFLayoutLMForMaskedLM.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "[MASK]"] @@ -1181,10 +1182,10 @@ def call( Examples: ```python - >>> from transformers import LayoutLMTokenizer, TFLayoutLMForSequenceClassification + >>> from transformers import AutoTokenizer, TFLayoutLMForSequenceClassification >>> import tensorflow as tf - >>> tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") + >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "world"] @@ -1310,9 +1311,9 @@ def call( ```python >>> import tensorflow as tf - >>> from transformers import LayoutLMTokenizer, TFLayoutLMForTokenClassification + >>> from transformers import AutoTokenizer, TFLayoutLMForTokenClassification - >>> tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") + >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") >>> model = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased") >>> words = ["Hello", "world"] @@ -1377,3 +1378,150 @@ def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOu attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) + + +@add_start_docstrings( + """ + LayoutLM Model with a span classification head on top for extractive question-answering tasks such as + [DocVQA](https://rrc.cvc.uab.es/?ch=17) (a linear layer on top of the final hidden-states output to compute `span + start logits` and `span end logits`). + """, + LAYOUTLM_START_DOCSTRING, +) +class TFLayoutLMForQuestionAnswering(TFLayoutLMPreTrainedModel, TFQuestionAnsweringLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [ + r"pooler", + r"mlm___cls", + r"nsp___cls", + r"cls.predictions", + r"cls.seq_relationship", + ] + + def __init__(self, config: LayoutLMConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.layoutlm = TFLayoutLMMainLayer(config, add_pooling_layer=True, name="layoutlm") + self.qa_outputs = tf.keras.layers.Dense( + units=config.num_labels, + kernel_initializer=get_initializer(config.initializer_range), + name="qa_outputs", + ) + + @unpack_inputs + @add_start_docstrings_to_model_forward(LAYOUTLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + bbox: Optional[Union[np.ndarray, tf.Tensor]] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, + end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: Optional[bool] = False, + ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: + r""" + start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + + Returns: + + Examples: + + ```python + >>> import tensorflow as tf + >>> from transformers import AutoTokenizer, TFLayoutLMForQuestionAnswering + >>> from datasets import load_dataset + + >>> tokenizer = AutoTokenizer.from_pretrained("impira/layoutlm-document-qa", add_prefix_space=True) + >>> model = TFLayoutLMForQuestionAnswering.from_pretrained("impira/layoutlm-document-qa") + + >>> dataset = load_dataset("nielsr/funsd", split="train") + >>> example = dataset[0] + >>> question = "what's his name?" + >>> words = example["words"] + >>> boxes = example["bboxes"] + + >>> encoding = tokenizer( + ... question.split(), words, is_split_into_words=True, return_token_type_ids=True, return_tensors="tf" + ... ) + >>> bbox = [] + >>> for i, s, w in zip(encoding.input_ids[0], encoding.sequence_ids(0), encoding.word_ids(0)): + ... if s == 1: + ... bbox.append(boxes[w]) + ... elif i == tokenizer.sep_token_id: + ... bbox.append([1000] * 4) + ... else: + ... bbox.append([0] * 4) + >>> encoding["bbox"] = tf.convert_to_tensor([bbox]) + + >>> word_ids = encoding.word_ids(0) + >>> outputs = model(**encoding) + >>> loss = outputs.loss + >>> start_scores = outputs.start_logits + >>> end_scores = outputs.end_logits + >>> start, end = word_ids[tf.math.argmax(start_scores, -1)[0]], word_ids[tf.math.argmax(end_scores, -1)[0]] + >>> print(" ".join(words[start : end + 1])) + M. Hamann P. Harper, P. Martinez + ```""" + + outputs = self.layoutlm( + input_ids=input_ids, + bbox=bbox, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(inputs=sequence_output) + start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1) + start_logits = tf.squeeze(input=start_logits, axis=-1) + end_logits = tf.squeeze(input=end_logits, axis=-1) + loss = None + + if start_positions is not None and end_positions is not None: + labels = {"start_position": start_positions} + labels["end_position"] = end_positions + loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits)) + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFQuestionAnsweringModelOutput( + loss=loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFQuestionAnsweringModelOutput( + start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns + ) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 96a93ecae942a7..9c0db79e44b8df 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -2469,6 +2469,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class LayoutLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class LayoutLMForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index e77a414cdce437..e09d26aec5c96b 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -129,6 +129,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +class TFLayoutLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + class TFLayoutLMForSequenceClassification(metaclass=DummyObject): _backends = ["tf"] diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index 3c3babd4037780..aec3c950ae435a 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -147,6 +147,7 @@ def _generate_supported_model_class_names( "GPT2DoubleHeadsModel", "Speech2Text2Decoder", "TrOCRDecoder", + "LayoutLMForQuestionAnswering", # TODO: add support for them as it should be quite easy to do so (small blocking issues). # XLNetForQuestionAnswering, ] @@ -690,6 +691,7 @@ def _generate_dummy_input( inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), + "LayoutLMForQuestionAnswering", "XLNetForQuestionAnswering", ]: inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) diff --git a/tests/models/layoutlm/test_modeling_layoutlm.py b/tests/models/layoutlm/test_modeling_layoutlm.py index e2d949611d78e8..cce3c9b3f48615 100644 --- a/tests/models/layoutlm/test_modeling_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_layoutlm.py @@ -13,10 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. - +import copy import unittest from transformers import LayoutLMConfig, is_torch_available +from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester @@ -27,7 +28,11 @@ import torch from transformers import ( + MODEL_FOR_MASKED_LM_MAPPING, + MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMForMaskedLM, + LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, @@ -181,6 +186,23 @@ def create_and_check_for_token_classification( result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) + def create_and_check_for_question_answering( + self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + model = LayoutLMForQuestionAnswering(config=config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + bbox=bbox, + attention_mask=input_mask, + token_type_ids=token_type_ids, + start_positions=sequence_labels, + end_positions=sequence_labels, + ) + self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) + self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) + def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( @@ -211,6 +233,7 @@ class LayoutLMModelTest(ModelTesterMixin, unittest.TestCase): LayoutLMForMaskedLM, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, + LayoutLMForQuestionAnswering, ) if is_torch_available() else None @@ -246,6 +269,34 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) + def test_for_question_answering(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_question_answering(*config_and_inputs) + + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + inputs_dict = copy.deepcopy(inputs_dict) + if return_labels: + if model_class in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): + inputs_dict["labels"] = torch.zeros( + self.model_tester.batch_size, dtype=torch.long, device=torch_device + ) + elif model_class in [ + *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), + *get_values(MODEL_FOR_MASKED_LM_MAPPING), + ]: + inputs_dict["labels"] = torch.zeros( + (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device + ) + elif model_class.__name__ == "LayoutLMForQuestionAnswering": + inputs_dict["start_positions"] = torch.zeros( + self.model_tester.batch_size, dtype=torch.long, device=torch_device + ) + inputs_dict["end_positions"] = torch.zeros( + self.model_tester.batch_size, dtype=torch.long, device=torch_device + ) + + return inputs_dict + def prepare_layoutlm_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: @@ -337,3 +388,18 @@ def test_forward_pass_token_classification(self): logits = outputs.logits expected_shape = torch.Size((2, 25, 13)) self.assertEqual(logits.shape, expected_shape) + + @slow + def test_forward_pass_question_answering(self): + # initialize model with randomly initialized token classification head + model = LayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased").to(torch_device) + + input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() + + # forward pass + outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) + + # test the shape of the logits + expected_shape = torch.Size((2, 25)) + self.assertEqual(outputs.start_logits.shape, expected_shape) + self.assertEqual(outputs.end_logits.shape, expected_shape) diff --git a/tests/models/layoutlm/test_modeling_tf_layoutlm.py b/tests/models/layoutlm/test_modeling_tf_layoutlm.py index fb230aab56e820..9323b0bb9b97d2 100644 --- a/tests/models/layoutlm/test_modeling_tf_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_tf_layoutlm.py @@ -13,11 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +import copy import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available +from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester @@ -27,9 +29,15 @@ if is_tf_available(): import tensorflow as tf + from transformers import ( + TF_MODEL_FOR_MASKED_LM_MAPPING, + TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, + TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, + ) from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, + TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, @@ -174,6 +182,15 @@ def create_and_check_for_token_classification( result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) + def create_and_check_for_question_answering( + self, config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + model = TFLayoutLMForQuestionAnswering(config=config) + + result = model(input_ids, bbox, attention_mask=input_mask, token_type_ids=token_type_ids) + self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) + self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) + def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( @@ -199,7 +216,13 @@ def prepare_config_and_inputs_for_common(self): class TFLayoutLMModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( - (TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification) + ( + TFLayoutLMModel, + TFLayoutLMForMaskedLM, + TFLayoutLMForTokenClassification, + TFLayoutLMForSequenceClassification, + TFLayoutLMForQuestionAnswering, + ) if is_tf_available() else () ) @@ -230,12 +253,34 @@ def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) + def test_for_question_answering(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_question_answering(*config_and_inputs) + @slow def test_model_from_pretrained(self): for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFLayoutLMModel.from_pretrained(model_name) self.assertIsNotNone(model) + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + inputs_dict = copy.deepcopy(inputs_dict) + if return_labels: + if model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): + inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) + elif model_class in [ + *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), + *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), + ]: + inputs_dict["labels"] = tf.zeros( + (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 + ) + elif model_class.__name__ == "TFLayoutLMForQuestionAnswering": + inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) + inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) + + return inputs_dict + def prepare_layoutlm_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: @@ -316,3 +361,18 @@ def test_forward_pass_token_classification(self): logits = outputs.logits expected_shape = tf.convert_to_tensor((2, 25, 13)) self.assertEqual(logits.shape, expected_shape) + + @slow + def test_forward_pass_question_answering(self): + # initialize model with randomly initialized token classification head + model = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased") + + input_ids, attention_mask, bbox, token_type_ids, labels = prepare_layoutlm_batch_inputs() + + # forward pass + outputs = model(input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids) + + # test the shape of the logits + expected_shape = tf.convert_to_tensor((2, 25)) + self.assertEqual(outputs.start_logits.shape, expected_shape) + self.assertEqual(outputs.end_logits.shape, expected_shape) diff --git a/utils/check_repo.py b/utils/check_repo.py index 1c5f3709530d94..c3ecfbebe48ceb 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -161,6 +161,7 @@ "FlavaImageModel", "FlavaMultimodalModel", "GPT2DoubleHeadsModel", + "LayoutLMForQuestionAnswering", "LukeForMaskedLM", "LukeForEntityClassification", "LukeForEntityPairClassification", @@ -178,6 +179,7 @@ "RealmReader", "TFDPRReader", "TFGPT2DoubleHeadsModel", + "TFLayoutLMForQuestionAnswering", "TFOpenAIGPTDoubleHeadsModel", "TFRagModel", "TFRagSequenceForGeneration", diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 7545b4986078b2..c8ef3a07f7a6e8 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -38,6 +38,8 @@ src/transformers/models/glpn/modeling_glpn.py src/transformers/models/gpt2/modeling_gpt2.py src/transformers/models/gptj/modeling_gptj.py src/transformers/models/hubert/modeling_hubert.py +src/transformers/models/layoutlm/modeling_layoutlm.py +src/transformers/models/layoutlm/modeling_tf_layoutlm.py src/transformers/models/layoutlmv2/modeling_layoutlmv2.py src/transformers/models/layoutlmv3/modeling_layoutlmv3.py src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py From fea4636cfaccb27fabc0c1d742e7667b17fcc8cd Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Wed, 31 Aug 2022 09:07:53 +0100 Subject: [PATCH 170/539] Pin max tf version (#18818) --- setup.py | 4 ++-- src/transformers/dependency_versions_table.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index 05ec2c7617fd98..e6f442e5ec23e3 100644 --- a/setup.py +++ b/setup.py @@ -154,8 +154,8 @@ "sigopt", "librosa", "starlette", - "tensorflow-cpu>=2.3", - "tensorflow>=2.3", + "tensorflow-cpu>=2.3,<2.10", + "tensorflow>=2.3,<2.10", "tensorflow-text", "tf2onnx", "timeout-decorator", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index be3dba684bd58d..fec2fc9203240d 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -60,8 +60,8 @@ "sigopt": "sigopt", "librosa": "librosa", "starlette": "starlette", - "tensorflow-cpu": "tensorflow-cpu>=2.3", - "tensorflow": "tensorflow>=2.3", + "tensorflow-cpu": "tensorflow-cpu>=2.3,<2.10", + "tensorflow": "tensorflow>=2.3,<2.10", "tensorflow-text": "tensorflow-text", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", From c3be98ebab39aac7496bfa494bfa924dac0e8600 Mon Sep 17 00:00:00 2001 From: Peter Jung Date: Wed, 31 Aug 2022 14:28:58 +0200 Subject: [PATCH 171/539] Fix cost condition in DetrHungarianMatcher and YolosHungarianMatcher to allow zero-cost (#18647) * Fix loss condition in DetrHungarianMatcher * Fix costs condition in YolosHungarianMatcher --- src/transformers/models/detr/modeling_detr.py | 2 +- src/transformers/models/yolos/modeling_yolos.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 67adaf6265dba8..deaab873fc493a 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -2153,7 +2153,7 @@ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost - if class_cost == 0 or bbox_cost == 0 or giou_cost == 0: + if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() diff --git a/src/transformers/models/yolos/modeling_yolos.py b/src/transformers/models/yolos/modeling_yolos.py index 447cec23de97fe..db70670b6c0334 100755 --- a/src/transformers/models/yolos/modeling_yolos.py +++ b/src/transformers/models/yolos/modeling_yolos.py @@ -1145,7 +1145,7 @@ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost - if class_cost == 0 or bbox_cost == 0 or giou_cost == 0: + if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() From cdde85a0a00955354e22638cfb2a73164a4e2496 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Wed, 31 Aug 2022 20:35:10 +0800 Subject: [PATCH 172/539] oob performance improvement for cpu DDP (#18595) * oob performance improvement for cpu DDP Signed-off-by: Wang, Yi A * add is_psutil_available check Signed-off-by: Wang, Yi A Signed-off-by: Wang, Yi A --- src/transformers/training_args.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 7a23281d82ee21..8e6d91084c208a 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -39,6 +39,7 @@ ccl_version, get_full_repo_name, is_accelerate_available, + is_psutil_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_torch_available, @@ -1342,6 +1343,21 @@ def _setup_devices(self) -> "torch.device": "Looks like distributed multinode run but MASTER_ADDR env not set, " "please try exporting rank 0's hostname as MASTER_ADDR" ) + if ( + torch.get_num_threads() == 1 + and get_int_from_env(["OMP_NUM_THREADS", "MKL_NUM_THREADS"], 0) == 0 + and is_psutil_available() + ): + import psutil + + num_cpu_threads_per_process = int(psutil.cpu_count(logical=False) / local_size) + if num_cpu_threads_per_process == 0: + num_cpu_threads_per_process = 1 + torch.set_num_threads(num_cpu_threads_per_process) + logger.info( + f"num_cpu_threads_per_process unset, we set it at {num_cpu_threads_per_process} to improve oob" + " performance." + ) torch.distributed.init_process_group(backend=self.xpu_backend, rank=rank, world_size=size) elif is_torch_tpu_available(): device = xm.xla_device() From e4910213be9218fd8e3e6414c7feec7d1876229b Mon Sep 17 00:00:00 2001 From: Zachary Mueller Date: Wed, 31 Aug 2022 08:46:31 -0400 Subject: [PATCH 173/539] Warn on TPUs when the custom optimizer and model device are not the same (#18668) * Check optimizer for device on TPU * Typo --- src/transformers/trainer.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 0fe0c1b84ee7f2..c1eab71fff03db 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -465,6 +465,21 @@ def __init__( "Passing a `model_init` is incompatible with providing the `optimizers` argument. " "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." ) + if is_torch_tpu_available() and self.optimizer is not None: + for param in self.model.parameters(): + model_device = param.device + break + for param_group in self.optimizer.param_groups: + if len(param_group["params"]) > 0: + optimizer_device = param_group["params"][0].device + break + if model_device != optimizer_device: + raise ValueError( + "The model and the optimizer parameters are not on the same device, which probably means you" + " created an optimizer around your model **before** putting on the device and passing it to the" + " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and" + " `model.to(xm.xla_device())` is performed before the optimizer creation in your script." + ) if ((self.sharded_ddp is not None) or args.deepspeed or (self.fsdp is not None)) and ( self.optimizer is not None or self.lr_scheduler is not None ): From ee407024c458c83bc8832a76689029663f6538a7 Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Wed, 31 Aug 2022 15:10:25 +0200 Subject: [PATCH 174/539] Update location identification (#18834) --- .github/workflows/add-model-like.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/add-model-like.yml b/.github/workflows/add-model-like.yml index 2d2ab5b2e15b65..7b6e96bb874aa0 100644 --- a/.github/workflows/add-model-like.yml +++ b/.github/workflows/add-model-like.yml @@ -41,10 +41,12 @@ jobs: run: | . ~/venv/bin/activate python setup.py develop - transformer_loc=$(pip show transformers | grep "Location: " | cut -c11-) - transformer_repo_loc=$(pwd .) - if [ "$transformer_loc" != "$transformer_repo_loc/src" ]; then - echo "transformers is from $transformer_loc but it shoud be from $transformer_repo_loc/src." + transformers_install=$(pip list -e | grep transformers) + transformers_install_array=($transformers_install) + transformers_loc=${transformers_install_array[-1]} + transformers_repo_loc=$(pwd .) + if [ "$transformers_loc" != "$transformers_repo_loc" ]; then + echo "transformers is from $transformers_loc but it shoud be from $transformers_repo_loc/src." echo "A fix is required. Stop testing." exit 1 fi From 811c4c9f79758235762b4f70ffae00deae494fb1 Mon Sep 17 00:00:00 2001 From: Shu Takayama Date: Wed, 31 Aug 2022 23:37:18 +0900 Subject: [PATCH 175/539] fix bug: register_for_auto_class should be defined on TFPreTrainedModel instead of TFSequenceSummary (#18607) --- src/transformers/modeling_tf_utils.py | 52 +++++++++++++-------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 3587354b9326a9..484417f7ad33c5 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -2541,6 +2541,32 @@ def push_to_hub( work_dir, repo_id, files_timestamps, commit_message=commit_message, token=token ) + @classmethod + def register_for_auto_class(cls, auto_class="TFAutoModel"): + """ + Register this class with a given auto class. This should only be used for custom models as the ones in the + library are already mapped with an auto class. + + + + This API is experimental and may have some slight breaking changes in the next releases. + + + + Args: + auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`): + The auto class to register this new model with. + """ + if not isinstance(auto_class, str): + auto_class = auto_class.__name__ + + import transformers.models.auto as auto_module + + if not hasattr(auto_module, auto_class): + raise ValueError(f"{auto_class} is not a valid auto class.") + + cls._auto_class = auto_class + class TFConv1D(tf.keras.layers.Layer): """ @@ -2795,32 +2821,6 @@ def call(self, inputs, cls_index=None, training=False): return output - @classmethod - def register_for_auto_class(cls, auto_class="TFAutoModel"): - """ - Register this class with a given auto class. This should only be used for custom models as the ones in the - library are already mapped with an auto class. - - - - This API is experimental and may have some slight breaking changes in the next releases. - - - - Args: - auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`): - The auto class to register this new model with. - """ - if not isinstance(auto_class, str): - auto_class = auto_class.__name__ - - import transformers.models.auto as auto_module - - if not hasattr(auto_module, auto_class): - raise ValueError(f"{auto_class} is not a valid auto class.") - - cls._auto_class = auto_class - def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal: """ From 3b6943e7a3f39aa3108b9a08729a454e10d12ee4 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Wed, 31 Aug 2022 18:04:42 +0200 Subject: [PATCH 176/539] [DETR] Add num_channels attribute (#18714) * Add num_channels attribute * Fix code quality Co-authored-by: Niels Rogge --- .../models/detr/configuration_detr.py | 5 ++++- src/transformers/models/detr/modeling_detr.py | 13 +++++++++--- tests/models/detr/test_modeling_detr.py | 20 +++++++++++++++++++ 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/detr/configuration_detr.py b/src/transformers/models/detr/configuration_detr.py index fb320afdd2014e..604a7dad0f42e9 100644 --- a/src/transformers/models/detr/configuration_detr.py +++ b/src/transformers/models/detr/configuration_detr.py @@ -42,8 +42,9 @@ class DetrConfig(PretrainedConfig): Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. - Args: + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. num_queries (`int`, *optional*, defaults to 100): Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetrModel`] can detect in a single image. For COCO, we recommend 100 queries. @@ -132,6 +133,7 @@ class DetrConfig(PretrainedConfig): def __init__( self, + num_channels=3, num_queries=100, max_position_embeddings=1024, encoder_layers=6, @@ -167,6 +169,7 @@ def __init__( eos_coefficient=0.1, **kwargs ): + self.num_channels = num_channels self.num_queries = num_queries self.max_position_embeddings = max_position_embeddings self.d_model = d_model diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index deaab873fc493a..9d974f1a55123b 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -326,7 +326,7 @@ class DetrTimmConvEncoder(nn.Module): """ - def __init__(self, name: str, dilation: bool, use_pretrained_backbone: bool): + def __init__(self, name: str, dilation: bool, use_pretrained_backbone: bool, num_channels: int = 3): super().__init__() kwargs = {} @@ -336,7 +336,12 @@ def __init__(self, name: str, dilation: bool, use_pretrained_backbone: bool): requires_backends(self, ["timm"]) backbone = create_model( - name, pretrained=use_pretrained_backbone, features_only=True, out_indices=(1, 2, 3, 4), **kwargs + name, + pretrained=use_pretrained_backbone, + features_only=True, + out_indices=(1, 2, 3, 4), + in_chans=num_channels, + **kwargs, ) # replace batch norm by frozen batch norm with torch.no_grad(): @@ -1179,7 +1184,9 @@ def __init__(self, config: DetrConfig): super().__init__(config) # Create backbone + positional encoding - backbone = DetrTimmConvEncoder(config.backbone, config.dilation, config.use_pretrained_backbone) + backbone = DetrTimmConvEncoder( + config.backbone, config.dilation, config.use_pretrained_backbone, config.num_channels + ) position_embeddings = build_position_encoding(config) self.backbone = DetrConvModel(backbone, position_embeddings) diff --git a/tests/models/detr/test_modeling_detr.py b/tests/models/detr/test_modeling_detr.py index 7b0b7eeb75457c..d64c6a062e7501 100644 --- a/tests/models/detr/test_modeling_detr.py +++ b/tests/models/detr/test_modeling_detr.py @@ -416,6 +416,26 @@ def test_different_timm_backbone(self): self.assertTrue(outputs) + def test_greyscale_images(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + # use greyscale pixel values + inputs_dict["pixel_values"] = floats_tensor( + [self.model_tester.batch_size, 1, self.model_tester.min_size, self.model_tester.max_size] + ) + + # let's set num_channels to 1 + config.num_channels = 1 + + for model_class in self.all_model_classes: + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + self.assertTrue(outputs) + def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() From 74690b62a11bd0b48f86b10acda87f8f68a19251 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 31 Aug 2022 19:04:04 +0200 Subject: [PATCH 177/539] Pin ffspec (#18837) * Pin ffspec * Typo --- setup.py | 2 ++ src/transformers/dependency_versions_table.py | 1 + 2 files changed, 3 insertions(+) diff --git a/setup.py b/setup.py index e6f442e5ec23e3..c32f58546fd376 100644 --- a/setup.py +++ b/setup.py @@ -109,6 +109,7 @@ "fairscale>0.3", "faiss-cpu", "fastapi", + "fsspec!=2022.8.1", "filelock", "flake8>=3.8.3", "flax>=0.4.1", @@ -290,6 +291,7 @@ def run(self): "datasets", "dill", "evaluate", + "fsspec", # can be removed once the fix is in Datasets "pytest-timeout", "black", "sacrebleu", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index fec2fc9203240d..27740c43b1500b 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -15,6 +15,7 @@ "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", + "fsspec": "fsspec!=2022.8.1", "filelock": "filelock", "flake8": "flake8>=3.8.3", "flax": "flax>=0.4.1", From f210e2a414711b43fa66abaa271ac82923284f85 Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Wed, 31 Aug 2022 22:56:39 +0530 Subject: [PATCH 178/539] Improve GPT2 doc (#18787) * Minor typo in GPT2 doc * improve gpt2 label doc * update dim of label in GPT2ForTokenClassification * add change to tf --- src/transformers/models/gpt2/modeling_gpt2.py | 6 +++--- src/transformers/models/gpt2/modeling_tf_gpt2.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 4c6495d353d13e..2dde68aad8666b 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -1225,10 +1225,10 @@ def forward( r""" mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - - 1[`. + 1]`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set - `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size - 1]` All labels set to + `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]` mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` @@ -1519,7 +1519,7 @@ def forward( return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" - labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). diff --git a/src/transformers/models/gpt2/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py index b71c37dc48dbf9..5581b700a3eb62 100644 --- a/src/transformers/models/gpt2/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -983,7 +983,7 @@ def call( r""" mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - - 1[`. + 1]`. Return: From 86387fe87f7f0e99d984e7426b9e466dc6bb4d4d Mon Sep 17 00:00:00 2001 From: Felix Schneider <110813191+FelixSchneiderZoom@users.noreply.github.com> Date: Wed, 31 Aug 2022 20:26:45 +0200 Subject: [PATCH 179/539] Add an option to `HfArgumentParser.parse_{dict,json_file}` to raise an Exception when there extra keys (#18692) * Update parser to track unneeded keys, off by default * Fix formatting * Fix docstrings and defaults in HfArgparser * Fix formatting --- src/transformers/hf_argparser.py | 39 ++++++++++++++++++++++++++++---- tests/utils/test_hf_argparser.py | 13 +++++++++++ 2 files changed, 48 insertions(+), 4 deletions(-) diff --git a/src/transformers/hf_argparser.py b/src/transformers/hf_argparser.py index ac3245a29c8945..140651b2e82ab6 100644 --- a/src/transformers/hf_argparser.py +++ b/src/transformers/hf_argparser.py @@ -234,29 +234,60 @@ def parse_args_into_dataclasses( return (*outputs,) - def parse_json_file(self, json_file: str) -> Tuple[DataClass, ...]: + def parse_json_file(self, json_file: str, allow_extra_keys: bool = False) -> Tuple[DataClass, ...]: """ Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the dataclass types. + + Args: + json_file (`str` or `os.PathLike`): + File name of the json file to parse + allow_extra_keys (`bool`, *optional*, defaults to `False`): + Defaults to False. If False, will raise an exception if the json file contains keys that are not + parsed. + + Returns: + Tuple consisting of: + + - the dataclass instances in the same order as they were passed to the initializer. """ data = json.loads(Path(json_file).read_text()) + unused_keys = set(data.keys()) outputs = [] for dtype in self.dataclass_types: keys = {f.name for f in dataclasses.fields(dtype) if f.init} inputs = {k: v for k, v in data.items() if k in keys} + unused_keys.difference_update(inputs.keys()) obj = dtype(**inputs) outputs.append(obj) - return (*outputs,) + if not allow_extra_keys and unused_keys: + raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}") + return tuple(outputs) - def parse_dict(self, args: dict) -> Tuple[DataClass, ...]: + def parse_dict(self, args: Dict[str, Any], allow_extra_keys: bool = False) -> Tuple[DataClass, ...]: """ Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass types. + + Args: + args (`dict`): + dict containing config values + allow_extra_keys (`bool`, *optional*, defaults to `False`): + Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed. + + Returns: + Tuple consisting of: + + - the dataclass instances in the same order as they were passed to the initializer. """ + unused_keys = set(args.keys()) outputs = [] for dtype in self.dataclass_types: keys = {f.name for f in dataclasses.fields(dtype) if f.init} inputs = {k: v for k, v in args.items() if k in keys} + unused_keys.difference_update(inputs.keys()) obj = dtype(**inputs) outputs.append(obj) - return (*outputs,) + if not allow_extra_keys and unused_keys: + raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}") + return tuple(outputs) diff --git a/tests/utils/test_hf_argparser.py b/tests/utils/test_hf_argparser.py index 5ef63080a6e5cd..827888509bb36b 100644 --- a/tests/utils/test_hf_argparser.py +++ b/tests/utils/test_hf_argparser.py @@ -245,6 +245,19 @@ def test_parse_dict(self): args = BasicExample(**args_dict) self.assertEqual(parsed_args, args) + def test_parse_dict_extra_key(self): + parser = HfArgumentParser(BasicExample) + + args_dict = { + "foo": 12, + "bar": 3.14, + "baz": "42", + "flag": True, + "extra": 42, + } + + self.assertRaises(ValueError, parser.parse_dict, args_dict, allow_extra_keys=False) + def test_integration_training_args(self): parser = HfArgumentParser(TrainingArguments) self.assertIsNotNone(parser) From 89514f0541f312945854236132a5f4ea2516fa86 Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Thu, 1 Sep 2022 00:00:29 +0530 Subject: [PATCH 180/539] Improve Text Generation doc (#18788) * fix args for bram search decoding in generation utils * fix missing PAD token in gpt2 * add PAD EOS change to TF * Update src/transformers/generation_tf_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/generation_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/generation_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- src/transformers/generation_tf_utils.py | 2 +- src/transformers/generation_utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index 6c8da54835ac92..64cecebbe84838 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -2113,7 +2113,7 @@ def greedy_search( >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = TFAutoModelForCausalLM.from_pretrained("gpt2") - >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token + >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index b5b042e718c1c3..dcbe6e5946d24f 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1139,7 +1139,7 @@ def generate( >>> sentence = "Paris is one of the densest populated areas in Europe." >>> input_ids = tokenizer(sentence, return_tensors="pt").input_ids - >>> outputs = model.generate(input_ids) + >>> outputs = model.generate(input_ids, num_beams=5) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Paris ist eines der dichtesten besiedelten Gebiete Europas.'] ```""" @@ -1635,7 +1635,7 @@ def greedy_search( >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") - >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token + >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "It might be possible to" From 7e7f743481abff9bcabdf73047dffb7c1db9d18b Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Wed, 31 Aug 2022 20:58:44 +0200 Subject: [PATCH 181/539] Add SegFormer ONNX support (#18006) * Add ONNX support * Make height and width dynamic axes Co-authored-by: Niels Rogge --- docs/source/en/serialization.mdx | 1 + src/transformers/models/segformer/__init__.py | 6 +++-- .../segformer/configuration_segformer.py | 26 +++++++++++++++++++ src/transformers/onnx/features.py | 6 +++++ tests/onnx/test_onnx_v2.py | 1 + 5 files changed, 38 insertions(+), 2 deletions(-) diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index d6bf15df7ffcc4..31ad430e06434b 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -90,6 +90,7 @@ Ready-made configurations include the following architectures: - ResNet - RoBERTa - RoFormer +- SegFormer - SqueezeBERT - T5 - ViT diff --git a/src/transformers/models/segformer/__init__.py b/src/transformers/models/segformer/__init__.py index 2317237509a05d..7b8b60651da231 100644 --- a/src/transformers/models/segformer/__init__.py +++ b/src/transformers/models/segformer/__init__.py @@ -26,7 +26,9 @@ ) -_import_structure = {"configuration_segformer": ["SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegformerConfig"]} +_import_structure = { + "configuration_segformer": ["SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SegformerConfig", "SegformerOnnxConfig"] +} try: if not is_vision_available(): @@ -69,7 +71,7 @@ if TYPE_CHECKING: - from .configuration_segformer import SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SegformerConfig + from .configuration_segformer import SEGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SegformerConfig, SegformerOnnxConfig try: if not is_vision_available(): diff --git a/src/transformers/models/segformer/configuration_segformer.py b/src/transformers/models/segformer/configuration_segformer.py index faec5d6c4c9fb8..8b98af0faebdae 100644 --- a/src/transformers/models/segformer/configuration_segformer.py +++ b/src/transformers/models/segformer/configuration_segformer.py @@ -15,8 +15,13 @@ """ SegFormer model configuration""" import warnings +from collections import OrderedDict +from typing import Mapping + +from packaging import version from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig from ...utils import logging @@ -148,3 +153,24 @@ def __init__( self.decoder_hidden_size = decoder_hidden_size self.reshape_last_stage = kwargs.get("reshape_last_stage", True) self.semantic_loss_ignore_index = semantic_loss_ignore_index + + +class SegformerOnnxConfig(OnnxConfig): + + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + @property + def default_onnx_opset(self) -> int: + return 12 diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index b1ea30c1afc7ae..535686f179a53c 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -456,6 +456,12 @@ class FeaturesManager: "token-classification", onnx_config_cls="models.roformer.RoFormerOnnxConfig", ), + "segformer": supported_features_mapping( + "default", + "image-classification", + "semantic-segmentation", + onnx_config_cls="models.segformer.SegformerOnnxConfig", + ), "squeezebert": supported_features_mapping( "default", "masked-lm", diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index 16ee78a63c4874..f3c19ed8fa9872 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -216,6 +216,7 @@ def test_values_override(self): ("perceiver", "deepmind/vision-perceiver-conv", ("image-classification",)), ("longformer", "allenai/longformer-base-4096"), ("yolos", "hustvl/yolos-tiny"), + ("segformer", "nvidia/segformer-b0-finetuned-ade-512-512"), } PYTORCH_EXPORT_WITH_PAST_MODELS = { From 80367cd1fb6d36ca6bdd99b70586aab4ffae1ae1 Mon Sep 17 00:00:00 2001 From: lewtun Date: Wed, 31 Aug 2022 21:48:40 +0200 Subject: [PATCH 182/539] Add security warning about the from_pretrained() method (#18801) * Add security warning about from_pretrained() method * Add sentence about malware scanner Co-authored-by: Julien Chaumond --- docs/source/en/autoclass_tutorial.mdx | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/source/en/autoclass_tutorial.mdx b/docs/source/en/autoclass_tutorial.mdx index 51270302f23329..246b4b9b2e2ad8 100644 --- a/docs/source/en/autoclass_tutorial.mdx +++ b/docs/source/en/autoclass_tutorial.mdx @@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License. # Load pretrained instances with an AutoClass -With so many different Transformer architectures, it can be challenging to create one for your checkpoint. As a part of 🤗 Transformers core philosophy to make the library easy, simple and flexible to use, an `AutoClass` automatically infer and load the correct architecture from a given checkpoint. The `from_pretrained` method lets you quickly load a pretrained model for any architecture so you don't have to devote time and resources to train a model from scratch. Producing this type of checkpoint-agnostic code means if your code works for one checkpoint, it will work with another checkpoint - as long as it was trained for a similar task - even if the architecture is different. +With so many different Transformer architectures, it can be challenging to create one for your checkpoint. As a part of 🤗 Transformers core philosophy to make the library easy, simple and flexible to use, an `AutoClass` automatically infer and load the correct architecture from a given checkpoint. The `from_pretrained()` method lets you quickly load a pretrained model for any architecture so you don't have to devote time and resources to train a model from scratch. Producing this type of checkpoint-agnostic code means if your code works for one checkpoint, it will work with another checkpoint - as long as it was trained for a similar task - even if the architecture is different. @@ -95,6 +95,12 @@ Easily reuse the same checkpoint to load an architecture for a different task: >>> model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased") ``` + + +For PyTorch models, the `from_pretrained()` method uses `torch.load()` which internally uses `pickle` and is known to be insecure. In general, never load a model that could have come from an untrusted source, or that could have been tampered with. This security risk is partially mitigated for public models hosted on the Hugging Face Hub, which are [scanned for malware](https://huggingface.co/docs/hub/security-malware) at each commit. See the [Hub documentation](https://huggingface.co/docs/hub/security) for best practices like [signed commit verification](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg) with GPG. + + + Generally, we recommend using the `AutoTokenizer` class and the `AutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, feature extractor and processor to preprocess a dataset for fine-tuning. From 5d81a5683394dbcb151ba4842bd11f84c53edd6d Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Thu, 1 Sep 2022 10:31:08 +0300 Subject: [PATCH 183/539] Owlvit memory leak fix (#18734) * fix memory leak * fix typos * use singular last hidden state variable names * eliminate double call to self.owlvit to return last hidden states * eliminate 2nd call to self.vision_model in OwlViTModel --- .../models/owlvit/modeling_owlvit.py | 97 +++++++++++-------- 1 file changed, 54 insertions(+), 43 deletions(-) diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index 5ff22c901a351c..badd3db41e70d4 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -140,9 +140,9 @@ class OwlViTObjectDetectionOutput(ModelOutput): class_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`): Class embeddings of all image patches. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. - text_model_last_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`)): + text_model_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`)): Last hidden states extracted from the [`OwlViTTextModel`]. - vision_model_last_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_patches + 1, hidden_size)`)): + vision_model_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_patches + 1, hidden_size)`)): Last hidden states extracted from the [`OwlViTVisionModel`]. OWL-ViT represents images as a set of image patches where the total number of patches is (image_size / patch_size)**2. """ @@ -154,8 +154,8 @@ class OwlViTObjectDetectionOutput(ModelOutput): text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None class_embeds: torch.FloatTensor = None - text_model_last_hidden_states: Optional[torch.FloatTensor] = None - vision_model_last_hidden_states: Optional[torch.FloatTensor] = None + text_model_last_hidden_state: Optional[torch.FloatTensor] = None + vision_model_last_hidden_state: Optional[torch.FloatTensor] = None class OwlViTVisionEmbeddings(nn.Module): @@ -516,6 +516,9 @@ def _set_gradient_checkpointing(self, module, value=False): output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. + return_base_image_embeds (`bool`, *optional*): + Whether or not to return unprojected image embeddings. Set to `True` when `OwlViTModel` is called within + `OwlViTForObjectDetection`. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @@ -1013,6 +1016,7 @@ def forward( return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, + return_base_image_embeds: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, OwlViTOutput]: r""" @@ -1040,6 +1044,9 @@ def forward( ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict + # Whether to return unprojected image features + return_base_image_embeds = return_base_image_embeds if return_base_image_embeds is not None else False + vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, @@ -1075,6 +1082,10 @@ def forward( if return_loss: loss = owlvit_loss(logits_per_text) + if return_base_image_embeds: + last_hidden_state = vision_outputs[0] + image_embeds = self.vision_model.post_layernorm(last_hidden_state) + if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output @@ -1170,15 +1181,15 @@ def __init__(self, config: OwlViTConfig): def normalize_grid_corner_coordinates(self, feature_map: torch.FloatTensor): # Computes normalized xy corner coordinates from feature_map. if not feature_map.ndim == 4: - raise ValueError("Expected input shape is [batch_size, num_channels, height, width]") + raise ValueError("Expected input shape is [batch_size, num_patches, num_patches, hidden_dim]") device = feature_map.device - height, width = feature_map.shape[1:3] + num_patches = feature_map.shape[1] - box_coordinates = np.stack(np.meshgrid(np.arange(1, width + 1), np.arange(1, height + 1)), axis=-1).astype( - np.float32 - ) - box_coordinates /= np.array([width, height], np.float32) + box_coordinates = np.stack( + np.meshgrid(np.arange(1, num_patches + 1), np.arange(1, num_patches + 1)), axis=-1 + ).astype(np.float32) + box_coordinates /= np.array([num_patches, num_patches], np.float32) # Flatten (h, w, 2) -> (h*w, 2) box_coordinates = box_coordinates.reshape( @@ -1232,7 +1243,7 @@ def class_predictor( image_feats: torch.FloatTensor, query_embeds: torch.FloatTensor, query_mask: torch.Tensor, - ) -> Tuple[torch.FloatTensor, torch.FloatTensor]: + ) -> Tuple[torch.FloatTensor]: """ Args: image_feats: @@ -1252,18 +1263,21 @@ def image_text_embedder( pixel_values: torch.FloatTensor, attention_mask: torch.Tensor, output_attentions: Optional[bool] = None, - ) -> torch.FloatTensor: - # Encode text - text_embeds = self.owlvit.get_text_features( - input_ids=input_ids, attention_mask=attention_mask, output_attentions=output_attentions - ) + output_hidden_states: Optional[bool] = None, + ) -> Tuple[torch.FloatTensor]: - # Encode image - image_embeds = self.owlvit.get_image_features( - pixel_values, return_projected=False, output_attentions=output_attentions + # Encode text and image + outputs = self.owlvit( + pixel_values=pixel_values, + input_ids=input_ids, + attention_mask=attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_base_image_embeds=True, ) # Resize class token + image_embeds = outputs.image_embeds new_size = tuple(np.array(image_embeds.shape) - np.array((0, 1, 0))) class_token_out = torch.broadcast_to(image_embeds[:, :1, :], new_size) @@ -1279,8 +1293,13 @@ def image_text_embedder( image_embeds.shape[-1], ) image_embeds = image_embeds.reshape(new_size) + text_embeds = outputs.text_embeds + + # Last hidden states from text and vision transformers + text_model_last_hidden_state = outputs.text_model_output.last_hidden_state + vision_model_last_hidden_state = outputs.vision_model_output.last_hidden_state - return (image_embeds, text_embeds) + return (text_embeds, image_embeds, text_model_last_hidden_state, vision_model_last_hidden_state) @add_start_docstrings_to_model_forward(OWLVIT_OBJECT_DETECTION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=OwlViTObjectDetectionOutput, config_class=OwlViTConfig) @@ -1334,32 +1353,24 @@ def forward( ) return_dict = return_dict if return_dict is not None else self.config.return_dict - # Return last hidden states of text and vision transformers - text_model_last_hidden_states = None - vision_model_last_hidden_states = None - - if output_hidden_states: - outputs = self.owlvit( - input_ids=input_ids, - pixel_values=pixel_values, - attention_mask=attention_mask, - output_attentions=output_attentions, - output_hidden_states=output_hidden_states, - ) - - text_model_last_hidden_states = outputs[-2][0] - vision_model_last_hidden_states = outputs[-1][0] - # Embed images and text queries - feature_map, query_embeds = self.image_text_embedder( + outputs = self.image_text_embedder( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=output_attentions, + output_hidden_states=output_hidden_states, ) - batch_size, height, width, hidden_dim = feature_map.shape - image_feats = torch.reshape(feature_map, (batch_size, height * width, hidden_dim)) + # Last hidden states of text and vision transformers + text_model_last_hidden_state = outputs[2] + vision_model_last_hidden_state = outputs[3] + + query_embeds = outputs[0] + feature_map = outputs[1] + + batch_size, num_patches, num_patches, hidden_dim = feature_map.shape + image_feats = torch.reshape(feature_map, (batch_size, num_patches * num_patches, hidden_dim)) # Reshape from [batch_size * max_text_queries, hidden_dim] -> [batch_size, max_text_queries, hidden_dim] max_text_queries = input_ids.shape[0] // batch_size @@ -1382,8 +1393,8 @@ def forward( query_embeds, feature_map, class_embeds, - text_model_last_hidden_states, - vision_model_last_hidden_states, + text_model_last_hidden_state, + vision_model_last_hidden_state, ) output = tuple(x for x in output if x is not None) return output @@ -1394,6 +1405,6 @@ def forward( pred_boxes=pred_boxes, logits=pred_logits, class_embeds=class_embeds, - text_model_last_hidden_states=text_model_last_hidden_states, - vision_model_last_hidden_states=vision_model_last_hidden_states, + text_model_last_hidden_state=text_model_last_hidden_state, + vision_model_last_hidden_state=vision_model_last_hidden_state, ) From 359f7b4b8d3895a9ce6f0a98171676e8886b5804 Mon Sep 17 00:00:00 2001 From: flozi00 Date: Thu, 1 Sep 2022 09:57:59 +0200 Subject: [PATCH 184/539] Create pipeline_tutorial.mdx german docs (#18625) * Create pipeline_tutorial.mdx * Update _toctree.yml --- docs/source/de/_toctree.yml | 4 + docs/source/de/pipeline_tutorial.mdx | 171 +++++++++++++++++++++++++++ 2 files changed, 175 insertions(+) create mode 100644 docs/source/de/pipeline_tutorial.mdx diff --git a/docs/source/de/_toctree.yml b/docs/source/de/_toctree.yml index 6097df8d06ae0b..69f5df0a82e5b4 100644 --- a/docs/source/de/_toctree.yml +++ b/docs/source/de/_toctree.yml @@ -6,3 +6,7 @@ - local: installation title: Installation title: Erste Schritte +- sections: + - local: pipeline_tutorial + title: Pipelines für Inferenzen + title: Tutorials diff --git a/docs/source/de/pipeline_tutorial.mdx b/docs/source/de/pipeline_tutorial.mdx new file mode 100644 index 00000000000000..88c4c5195cb28d --- /dev/null +++ b/docs/source/de/pipeline_tutorial.mdx @@ -0,0 +1,171 @@ + + +# Pipelines für Inferenzen + +Die [`pipeline`] macht es einfach, jedes beliebige Modell aus dem [Hub](https://huggingface.co/models) für die Inferenz auf jede Sprache, Computer Vision, Sprache und multimodale Aufgaben zu verwenden. Selbst wenn Sie keine Erfahrung mit einer bestimmten Modalität haben oder nicht mit dem zugrundeliegenden Code hinter den Modellen vertraut sind, können Sie sie mit der [`pipeline`] für Inferenzen verwenden! In diesem Beispiel lernen Sie, wie: + +* Eine [`pipeline`] für Inferenz zu verwenden. +* Einen bestimmten Tokenizer oder ein bestimmtes Modell zu verwenden. +* Eine [`pipeline`] für Audio-, Vision- und multimodale Aufgaben zu verwenden. + + + +Eine vollständige Liste der unterstützten Aufgaben und verfügbaren Parameter finden Sie in der [`pipeline`]-Dokumentation. + + + +## Verwendung von Pipelines + +Obwohl jede Aufgabe eine zugehörige [`pipeline`] hat, ist es einfacher, die allgemeine [`pipeline`]-Abstraktion zu verwenden, die alle aufgabenspezifischen Pipelines enthält. Die [`pipeline`] lädt automatisch ein Standardmodell und eine Vorverarbeitungsklasse, die für Ihre Aufgabe inferenzfähig ist. + +1. Beginnen Sie mit der Erstellung einer [`pipeline`] und geben Sie eine Inferenzaufgabe an: + +```py +>>> from transformers import pipeline + +>>> generator = pipeline(task="text-generation") +``` + +2. Übergeben Sie Ihren Eingabetext an die [`pipeline`]: + +```py +>>> generator( +... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone" +... ) # doctest: +SKIP +[{'generated_text': 'Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Iron-priests at the door to the east, and thirteen for the Lord Kings at the end of the mountain'}] +``` + +Wenn Sie mehr als eine Eingabe haben, übergeben Sie die Eingabe als Liste: + +```py +>>> generator( +... [ +... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", +... "Nine for Mortal Men, doomed to die, One for the Dark Lord on his dark throne", +... ] +... ) # doctest: +SKIP +``` + +Alle zusätzlichen Parameter für Ihre Aufgabe können auch in die [`pipeline`] aufgenommen werden. Die Aufgabe `Text-Generierung` hat eine [`~generation_utils.GenerationMixin.generate`]-Methode mit mehreren Parametern zur Steuerung der Ausgabe. Wenn Sie zum Beispiel mehr als eine Ausgabe erzeugen wollen, setzen Sie den Parameter `num_return_sequences`: + +```py +>>> generator( +... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone", +... num_return_sequences=2, +... ) # doctest: +SKIP +``` + +### Wählen Sie ein Modell und einen Tokenizer + +Die [`pipeline`] akzeptiert jedes Modell aus dem [Hub] (https://huggingface.co/models). Auf dem Hub gibt es Tags, mit denen Sie nach einem Modell filtern können, das Sie für Ihre Aufgabe verwenden möchten. Sobald Sie ein passendes Modell ausgewählt haben, laden Sie es mit der entsprechenden `AutoModelFor` und [`AutoTokenizer`] Klasse. Laden Sie zum Beispiel die Klasse [`AutoModelForCausalLM`] für eine kausale Sprachmodellierungsaufgabe: + +```py +>>> from transformers import AutoTokenizer, AutoModelForCausalLM + +>>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") +>>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") +``` + +Erstellen Sie eine [`pipeline`] für Ihre Aufgabe, und geben Sie das Modell und den Tokenizer an, die Sie geladen haben: + +```py +>>> from transformers import pipeline + +>>> generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer) +``` + +Übergeben Sie Ihren Eingabetext an die [`pipeline`] , um einen Text zu erzeugen: + +```py +>>> generator( +... "Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone" +... ) # doctest: +SKIP +[{'generated_text': 'Three Rings for the Elven-kings under the sky, Seven for the Dwarf-lords in their halls of stone, Seven for the Dragon-lords (for them to rule in a world ruled by their rulers, and all who live within the realm'}] +``` + +## Audio-Pipeline + +Die [`pipeline`] unterstützt auch Audioaufgaben wie Audioklassifizierung und automatische Spracherkennung. + +Lassen Sie uns zum Beispiel die Emotion in diesem Audioclip klassifizieren: + +```py +>>> from datasets import load_dataset +>>> import torch + +>>> torch.manual_seed(42) # doctest: +IGNORE_RESULT +>>> ds = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") +>>> audio_file = ds[0]["audio"]["path"] +``` + +Finden Sie ein [Audioklassifikation](https://huggingface.co/models?pipeline_tag=audio-classification) Modell auf dem Model Hub für Emotionserkennung und laden Sie es in die [`pipeline`]: + +```py +>>> from transformers import pipeline + +>>> audio_classifier = pipeline( +... task="audio-classification", model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" +... ) +``` + +Übergeben Sie die Audiodatei an die [`pipeline`]: + +```py +>>> preds = audio_classifier(audio_file) +>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] +>>> preds +[{'score': 0.1315, 'label': 'calm'}, {'score': 0.1307, 'label': 'neutral'}, {'score': 0.1274, 'label': 'sad'}, {'score': 0.1261, 'label': 'fearful'}, {'score': 0.1242, 'label': 'happy'}] +``` + +## Bildverarbeitungs-Pipeline + +Die Verwendung einer [`pipeline`] für Bildverarbeitungsaufgaben ist praktisch identisch. + +Geben Sie Ihre Aufgabe an und übergeben Sie Ihr Bild an den Klassifikator. Das Bild kann ein Link oder ein lokaler Pfad zu dem Bild sein. Zum Beispiel: Welche Katzenart ist unten abgebildet? + +![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) + +```py +>>> from transformers import pipeline + +>>> vision_classifier = pipeline(task="image-classification") +>>> preds = vision_classifier( +... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" +... ) +>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] +>>> preds +[{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] +``` + +## Multimodale Pipeline + +Die [`pipeline`] unterstützt mehr als eine Modalität. Eine Aufgabe zur Beantwortung visueller Fragen (VQA) kombiniert zum Beispiel Text und Bild. Verwenden Sie einen beliebigen Bildlink und eine Frage, die Sie zu dem Bild stellen möchten. Das Bild kann eine URL oder ein lokaler Pfad zu dem Bild sein. + +Wenn Sie zum Beispiel das gleiche Bild wie in der obigen Vision-Pipeline verwenden: + +```py +>>> image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" +>>> question = "Where is the cat?" +``` + +Erstellen Sie eine Pipeline für "vqa" und übergeben Sie ihr das Bild und die Frage: + +```py +>>> from transformers import pipeline + +>>> vqa = pipeline(task="vqa") +>>> preds = vqa(image=image, question=question) +>>> preds = [{"score": round(pred["score"], 4), "answer": pred["answer"]} for pred in preds] +>>> preds +[{'score': 0.9112, 'answer': 'snow'}, {'score': 0.8796, 'answer': 'in snow'}, {'score': 0.6717, 'answer': 'outside'}, {'score': 0.0291, 'answer': 'on ground'}, {'score': 0.027, 'answer': 'ground'}] +``` From a26c752353a127ba8e4728413806f545718a8d78 Mon Sep 17 00:00:00 2001 From: Albert Villanova del Moral <8515462+albertvillanova@users.noreply.github.com> Date: Thu, 1 Sep 2022 10:20:15 +0200 Subject: [PATCH 185/539] Unpin fsspec (#18846) --- setup.py | 2 -- src/transformers/dependency_versions_table.py | 1 - 2 files changed, 3 deletions(-) diff --git a/setup.py b/setup.py index c32f58546fd376..e6f442e5ec23e3 100644 --- a/setup.py +++ b/setup.py @@ -109,7 +109,6 @@ "fairscale>0.3", "faiss-cpu", "fastapi", - "fsspec!=2022.8.1", "filelock", "flake8>=3.8.3", "flax>=0.4.1", @@ -291,7 +290,6 @@ def run(self): "datasets", "dill", "evaluate", - "fsspec", # can be removed once the fix is in Datasets "pytest-timeout", "black", "sacrebleu", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 27740c43b1500b..fec2fc9203240d 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -15,7 +15,6 @@ "fairscale": "fairscale>0.3", "faiss-cpu": "faiss-cpu", "fastapi": "fastapi", - "fsspec": "fsspec!=2022.8.1", "filelock": "filelock", "flake8": "flake8>=3.8.3", "flax": "flax>=0.4.1", From 563a8d58db0acd088f62167f23671ba2f88bae9c Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 1 Sep 2022 10:55:30 +0200 Subject: [PATCH 186/539] Delete `state_dict` to release memory as early as possible (#18832) Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: ydshieh --- src/transformers/modeling_utils.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index d77258c94ea089..04196633e1407f 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -417,7 +417,7 @@ def _load_state_dict_into_model(model_to_load, state_dict, start_prefix): # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants # so we need to apply the function recursively. - def load(module: nn.Module, prefix=""): + def load(module: nn.Module, state_dict, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) if is_deepspeed_zero3_enabled(): @@ -434,9 +434,12 @@ def load(module: nn.Module, prefix=""): for name, child in module._modules.items(): if child is not None: - load(child, prefix + name + ".") + load(child, state_dict, prefix + name + ".") - load(model_to_load, prefix=start_prefix) + load(model_to_load, state_dict, prefix=start_prefix) + # Delete `state_dict` so it could be collected by GC earlier. Note that `state_dict` is a copy of the argument, so + # it's safe to delete it. + del state_dict return error_msgs From 6e016634f1f58af66d6af27e272b5d9ad406e0f4 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Thu, 1 Sep 2022 10:53:39 +0100 Subject: [PATCH 187/539] Generate: smaller TF serving test (#18840) --- tests/test_modeling_tf_common.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index 3372e96454d7c2..a0413afdfa0ec9 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -75,7 +75,7 @@ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, BertConfig, TFAutoModel, - TFAutoModelForSeq2SeqLM, + TFAutoModelForCausalLM, TFAutoModelForSequenceClassification, TFBertModel, TFSharedEmbeddings, @@ -2180,8 +2180,8 @@ def test_checkpoint_sharding_local(self): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) def test_generate_tf_function_export(self): - test_model = TFAutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") - max_length = 8 + test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") + max_length = 2 class DummyModel(tf.Module): def __init__(self, model): @@ -2204,8 +2204,8 @@ def serving(self, input_ids, attention_mask): ) return {"sequences": outputs["sequences"]} - dummy_input_ids = [[2, 3, 4, 1, 0, 0, 0, 0], [102, 103, 104, 105, 1, 0, 0, 0]] - dummy_attention_masks = [[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0]] + dummy_input_ids = [[2, 0], [102, 103]] + dummy_attention_masks = [[1, 0], [1, 1]] dummy_model = DummyModel(model=test_model) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) From e7da38f5dc84956c4459746d0fa2ea4aa153767c Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 1 Sep 2022 12:02:52 +0200 Subject: [PATCH 188/539] add a script to get time info. from GA workflow jobs (#18822) Co-authored-by: ydshieh --- utils/get_github_job_time.py | 68 ++++++++++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 utils/get_github_job_time.py diff --git a/utils/get_github_job_time.py b/utils/get_github_job_time.py new file mode 100644 index 00000000000000..5065c108aab1b1 --- /dev/null +++ b/utils/get_github_job_time.py @@ -0,0 +1,68 @@ +import argparse +import math + +import dateutil.parser as date_parser +import requests + + +def extract_time_from_single_job(job): + """Extract time info from a single job in a GitHub Actions workflow run""" + + job_info = {} + + start = job["started_at"] + end = job["completed_at"] + + start_datetime = date_parser.parse(start) + end_datetime = date_parser.parse(end) + + duration_in_min = round((end_datetime - start_datetime).total_seconds() / 60.0) + + job_info["started_at"] = start + job_info["completed_at"] = end + job_info["duration"] = duration_in_min + + return job_info + + +def get_job_time(workflow_run_id): + """Extract time info for all jobs in a GitHub Actions workflow run""" + + url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" + result = requests.get(url).json() + job_time = {} + + try: + job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]}) + pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) + + for i in range(pages_to_iterate_over): + result = requests.get(url + f"&page={i + 2}").json() + job_time.update({job["name"]: extract_time_from_single_job(job) for job in result["jobs"]}) + + return job_time + except Exception as e: + print("Unknown error, could not fetch links.", e) + + return {} + + +if __name__ == "__main__": + r""" + Example: + + python get_github_job_time.py --workflow_run_id 2945609517 + """ + + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--workflow_run_id", default=None, type=str, required=True, help="A GitHub Actions workflow run id." + ) + args = parser.parse_args() + + job_time = get_job_time(args.workflow_run_id) + job_time = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) + + for k, v in job_time.items(): + print(f'{k}: {v["duration"]}') From fafbb57df14c69d3662c920502da0d401712d157 Mon Sep 17 00:00:00 2001 From: Albert Villanova del Moral <8515462+albertvillanova@users.noreply.github.com> Date: Thu, 1 Sep 2022 12:04:49 +0200 Subject: [PATCH 189/539] Pin rouge_score (#18247) * Pin rouge_score * Pin also in dependency_versions_table * Update excluded versions * Revert "Update excluded versions" This reverts commit 0d0362df30a816108835f5c061272ee2bafec270. * Revert "Revert "Update excluded versions"" This reverts commit 66c47af8a6baff253575631b0ba392e0354b6d56. --- setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index e6f442e5ec23e3..e974ff9a2b2b9a 100644 --- a/setup.py +++ b/setup.py @@ -145,7 +145,7 @@ "regex!=2019.12.17", "requests", "rjieba", - "rouge-score", + "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "sacrebleu>=1.4.12,<2.0.0", "sacremoses", "sagemaker>=2.31.0", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index fec2fc9203240d..c8f0f18793c0a5 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -51,7 +51,7 @@ "regex": "regex!=2019.12.17", "requests": "requests", "rjieba": "rjieba", - "rouge-score": "rouge-score", + "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", "sagemaker": "sagemaker>=2.31.0", From f719c0377f7f97c4bf9b6b54de209f4aad0aef4b Mon Sep 17 00:00:00 2001 From: Pedro Cuenca Date: Thu, 1 Sep 2022 12:05:40 +0200 Subject: [PATCH 190/539] Minor typo in prose of model outputs documentation. (#18848) --- docs/source/en/main_classes/output.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/main_classes/output.mdx b/docs/source/en/main_classes/output.mdx index c221afe90dc704..391955ce291300 100644 --- a/docs/source/en/main_classes/output.mdx +++ b/docs/source/en/main_classes/output.mdx @@ -16,7 +16,7 @@ All models have outputs that are instances of subclasses of [`~utils.ModelOutput data structures containing all the information returned by the model, but that can also be used as tuples or dictionaries. -Let's see of this looks on an example: +Let's see how this looks in an example: ```python from transformers import BertTokenizer, BertForSequenceClassification From ab663b227490ee82fe944bbf3680d26fd53b337c Mon Sep 17 00:00:00 2001 From: kumapo Date: Thu, 1 Sep 2022 22:12:38 +0900 Subject: [PATCH 191/539] reflect max_new_tokens in `Seq2SeqTrainer` (#18786) * reflect max_new_tokens in gen_kwargs to `trainer.generate()` * reflect max_new_tokens in `Seq2SeqTrainer` * remove unnecessary variable * Trigger CI * fix style --- src/transformers/trainer_seq2seq.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/src/transformers/trainer_seq2seq.py b/src/transformers/trainer_seq2seq.py index 02ce3d393b9e75..7689998c051b8f 100644 --- a/src/transformers/trainer_seq2seq.py +++ b/src/transformers/trainer_seq2seq.py @@ -68,9 +68,8 @@ def evaluate( """ gen_kwargs = gen_kwargs.copy() - gen_kwargs["max_length"] = ( - gen_kwargs["max_length"] if gen_kwargs.get("max_length") is not None else self.args.generation_max_length - ) + if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None: + gen_kwargs["max_length"] = self.args.generation_max_length gen_kwargs["num_beams"] = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams ) @@ -126,9 +125,8 @@ def predict( """ gen_kwargs = gen_kwargs.copy() - gen_kwargs["max_length"] = ( - gen_kwargs["max_length"] if gen_kwargs.get("max_length") is not None else self.args.generation_max_length - ) + if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None: + gen_kwargs["max_length"] = self.args.generation_max_length gen_kwargs["num_beams"] = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams ) @@ -174,9 +172,8 @@ def prediction_step( # XXX: adapt synced_gpus for fairscale as well gen_kwargs = self._gen_kwargs.copy() - gen_kwargs["max_length"] = ( - gen_kwargs["max_length"] if gen_kwargs.get("max_length") is not None else self.model.config.max_length - ) + if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None: + gen_kwargs["max_length"] = self.model.config.max_length gen_kwargs["num_beams"] = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.model.config.num_beams ) @@ -203,8 +200,12 @@ def prediction_step( **gen_kwargs, ) # in case the batch is shorter than max length, the output should be padded - if generated_tokens.shape[-1] < gen_kwargs["max_length"]: + if gen_kwargs.get("max_length") is not None and generated_tokens.shape[-1] < gen_kwargs["max_length"]: generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"]) + elif gen_kwargs.get("max_new_tokens") is not None and generated_tokens.shape[-1] < ( + gen_kwargs["max_new_tokens"] + 1 + ): + generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_new_tokens"] + 1) with torch.no_grad(): with self.compute_loss_context_manager(): @@ -222,8 +223,12 @@ def prediction_step( if has_labels: labels = inputs["labels"] - if labels.shape[-1] < gen_kwargs["max_length"]: + if gen_kwargs.get("max_length") is not None and labels.shape[-1] < gen_kwargs["max_length"]: labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"]) + elif gen_kwargs.get("max_new_tokens") is not None and labels.shape[-1] < ( + gen_kwargs["max_new_tokens"] + 1 + ): + labels = self._pad_tensors_to_max_len(labels, (gen_kwargs["max_new_tokens"] + 1)) else: labels = None From fe58929ad612fe0027cf745b4e198cdf65d0dbe9 Mon Sep 17 00:00:00 2001 From: Gustavo de Rosa Date: Thu, 1 Sep 2022 11:33:53 -0300 Subject: [PATCH 192/539] Adds timeout argument to training_args to avoid socket timeouts in DDP (#18562) * chore(training_args): Adds support for timeout argument. * fix(training_args): Passes make style through changes. * fix(training_args): Removes wrong docstring sentence. * fix(training_args): Fixes timeout not being JSON serializable. * fix(training_args_sm): Also updates timeout to timeout_delta. * fix(training_args): Fixes PR according to suggestions. --- .../sagemaker/training_args_sm.py | 4 +-- src/transformers/training_args.py | 27 ++++++++++++++++--- 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/src/transformers/sagemaker/training_args_sm.py b/src/transformers/sagemaker/training_args_sm.py index 6be0deb1f479d0..e4a356a25b180f 100644 --- a/src/transformers/sagemaker/training_args_sm.py +++ b/src/transformers/sagemaker/training_args_sm.py @@ -92,7 +92,7 @@ def _setup_devices(self) -> "torch.device": elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 - torch.distributed.init_process_group(backend="smddp") + torch.distributed.init_process_group(backend="smddp", timeout=self.ddp_timeout_delta) self.local_rank = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK")) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 @@ -111,7 +111,7 @@ def _setup_devices(self) -> "torch.device": # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): - torch.distributed.init_process_group(backend="nccl") + torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 8e6d91084c208a..646e9343571b99 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -18,6 +18,7 @@ import os import warnings from dataclasses import asdict, dataclass, field +from datetime import timedelta from enum import Enum from pathlib import Path from typing import Any, Dict, List, Optional, Union @@ -481,6 +482,11 @@ class TrainingArguments: are also available. See the [Ray documentation]( https://docs.ray.io/en/latest/tune/api_docs/analysis.html#ray.tune.ExperimentAnalysis.get_best_trial) for more options. + ddp_timeout (`int`, *optional*, defaults to 1800): + The timeout for `torch.distributed.init_process_group` calls, used to avoid GPU socket timeouts when + performing slow operations in distributed runnings. Please refer the [PyTorch documentation] + (https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group) for more + information. use_mps_device (`bool`, *optional*, defaults to `False`): Whether to use Apple Silicon chip based `mps` device. """ @@ -971,6 +977,12 @@ class TrainingArguments: ) }, ) + ddp_timeout: Optional[int] = field( + default=1800, + metadata={ + "help": "Overrides the default timeout for distributed training (value should be given in seconds)." + }, + ) def __post_init__(self): # Handle --use_env option in torch.distributed.launch (local_rank not passed as an arg then). @@ -1291,6 +1303,13 @@ def eval_batch_size(self) -> int: eval_batch_size = per_device_batch_size * max(1, self.n_gpu) return eval_batch_size + @property + def ddp_timeout_delta(self) -> timedelta: + """ + The actual timeout for torch.distributed.init_process_group since it expects a timedelta variable. + """ + return timedelta(seconds=self.ddp_timeout) + @cached_property @torch_required def _setup_devices(self) -> "torch.device": @@ -1358,7 +1377,9 @@ def _setup_devices(self) -> "torch.device": f"num_cpu_threads_per_process unset, we set it at {num_cpu_threads_per_process} to improve oob" " performance." ) - torch.distributed.init_process_group(backend=self.xpu_backend, rank=rank, world_size=size) + torch.distributed.init_process_group( + backend=self.xpu_backend, rank=rank, world_size=size, timeout=self.ddp_timeout_delta + ) elif is_torch_tpu_available(): device = xm.xla_device() self._n_gpu = 0 @@ -1369,7 +1390,7 @@ def _setup_devices(self) -> "torch.device": elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 - dist.init_process_group(backend="smddp") + dist.init_process_group(backend="smddp", timeout=self.ddp_timeout_delta) self.local_rank = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK")) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 @@ -1431,7 +1452,7 @@ def _setup_devices(self) -> "torch.device": # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): - torch.distributed.init_process_group(backend="nccl") + torch.distributed.init_process_group(backend="nccl", timeout=self.ddp_timeout_delta) device = torch.device("cuda", self.local_rank) self._n_gpu = 1 From 954e18ab9713da83e1484f78a6f6e178b0d9fe2a Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Thu, 1 Sep 2022 20:05:15 +0530 Subject: [PATCH 193/539] TensorFlow MobileViT (#18555) * initial implementation. * add: working model till image classification. * add: initial implementation that passes intg tests. Co-authored-by: Amy * chore: formatting. * add: tests (still breaking because of config mismatch). Coo-authored-by: Yih <2521628+ydshieh@users.noreply.github.com> * add: corrected tests and remaning changes. * fix code style and repo consistency. * address PR comments. * address Amy's comments. * chore: remove from_pt argument. * chore: add full-stop. * fix: TFLite model conversion in the doc. * Update src/transformers/models/mobilevit/modeling_tf_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/modeling_tf_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/modeling_tf_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/modeling_tf_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/mobilevit/modeling_tf_mobilevit.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * apply formatting. * chore: remove comments from the example block. * remove identation in the example. Co-authored-by: Amy Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- docs/source/en/index.mdx | 2 +- docs/source/en/model_doc/mobilevit.mdx | 47 +- src/transformers/__init__.py | 14 + src/transformers/modeling_tf_outputs.py | 31 + .../models/auto/modeling_tf_auto.py | 3 + src/transformers/models/mobilevit/__init__.py | 43 +- .../models/mobilevit/modeling_tf_mobilevit.py | 1152 +++++++++++++++++ src/transformers/utils/dummy_tf_objects.py | 31 + .../mobilevit/test_modeling_tf_mobilevit.py | 420 ++++++ utils/documentation_tests.txt | 1 + 10 files changed, 1740 insertions(+), 4 deletions(-) create mode 100644 src/transformers/models/mobilevit/modeling_tf_mobilevit.py create mode 100644 tests/models/mobilevit/test_modeling_tf_mobilevit.py diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index ed04cad3dd9bf0..82053b11effdda 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -259,7 +259,7 @@ Flax), PyTorch, and/or TensorFlow. | mBART | ✅ | ✅ | ✅ | ✅ | ✅ | | Megatron-BERT | ❌ | ❌ | ✅ | ❌ | ❌ | | MobileBERT | ✅ | ✅ | ✅ | ✅ | ❌ | -| MobileViT | ❌ | ❌ | ✅ | ❌ | ❌ | +| MobileViT | ❌ | ❌ | ✅ | ✅ | ❌ | | MPNet | ✅ | ✅ | ✅ | ✅ | ❌ | | MT5 | ✅ | ✅ | ✅ | ✅ | ✅ | | MVP | ✅ | ✅ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/mobilevit.mdx b/docs/source/en/model_doc/mobilevit.mdx index f5fd403fd59eda..5725bd5ce5835f 100644 --- a/docs/source/en/model_doc/mobilevit.mdx +++ b/docs/source/en/model_doc/mobilevit.mdx @@ -22,12 +22,40 @@ The abstract from the paper is the following: Tips: -- MobileViT is more like a CNN than a Transformer model. It does not work on sequence data but on batches of images. Unlike ViT, there are no embeddings. The backbone model outputs a feature map. +- MobileViT is more like a CNN than a Transformer model. It does not work on sequence data but on batches of images. Unlike ViT, there are no embeddings. The backbone model outputs a feature map. You can follow [this tutorial](https://keras.io/examples/vision/mobilevit) for a lightweight introduction. - One can use [`MobileViTFeatureExtractor`] to prepare images for the model. Note that if you do your own preprocessing, the pretrained checkpoints expect images to be in BGR pixel order (not RGB). - The available image classification checkpoints are pre-trained on [ImageNet-1k](https://huggingface.co/datasets/imagenet-1k) (also referred to as ILSVRC 2012, a collection of 1.3 million images and 1,000 classes). - The segmentation model uses a [DeepLabV3](https://arxiv.org/abs/1706.05587) head. The available semantic segmentation checkpoints are pre-trained on [PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/). +- As the name suggests MobileViT was desgined to be performant and efficient on mobile phones. The TensorFlow versions of the MobileViT models are fully compatible with [TensorFlow Lite](https://www.tensorflow.org/lite). -This model was contributed by [matthijs](https://huggingface.co/Matthijs). The original code and weights can be found [here](https://github.com/apple/ml-cvnets). + You can use the following code to convert a MobileViT checkpoint (be it image classification or semantic segmentation) to generate a + TensorFlow Lite model: + +```py +from transformers import TFMobileViTForImageClassification +import tensorflow as tf + + +model_ckpt = "apple/mobilevit-xx-small" +model = TFMobileViTForImageClassification.from_pretrained(model_ckpt) + +converter = tf.lite.TFLiteConverter.from_keras_model(model) +converter.optimizations = [tf.lite.Optimize.DEFAULT] +converter.target_spec.supported_ops = [ + tf.lite.OpsSet.TFLITE_BUILTINS, + tf.lite.OpsSet.SELECT_TF_OPS, +] +tflite_model = converter.convert() +tflite_filename = model_ckpt.split("/")[-1] + ".tflite" +with open(tflite_filename, "wb") as f: + f.write(tflite_model) +``` + + The resulting model will be just **about an MB** making it a good fit for mobile applications where resources and network + bandwidth can be constrained. + + +This model was contributed by [matthijs](https://huggingface.co/Matthijs). The TensorFlow version of the model was contributed by [sayakpaul](https://huggingface.co/sayakpaul). The original code and weights can be found [here](https://github.com/apple/ml-cvnets). ## MobileViTConfig @@ -53,3 +81,18 @@ This model was contributed by [matthijs](https://huggingface.co/Matthijs). The o [[autodoc]] MobileViTForSemanticSegmentation - forward + +## TFMobileViTModel + +[[autodoc]] TFMobileViTModel + - call + +## TFMobileViTForImageClassification + +[[autodoc]] TFMobileViTForImageClassification + - call + +## TFMobileViTForSemanticSegmentation + +[[autodoc]] TFMobileViTForSemanticSegmentation + - call diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index aff905b97ec5d7..8be2f73b9e7a55 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -2398,6 +2398,15 @@ "TFMobileBertPreTrainedModel", ] ) + _import_structure["models.mobilevit"].extend( + [ + "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFMobileViTPreTrainedModel", + "TFMobileViTModel", + "TFMobileViTForImageClassification", + "TFMobileViTForSemanticSegmentation", + ] + ) _import_structure["models.mpnet"].extend( [ "TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -4847,6 +4856,7 @@ from .models.mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel from .models.mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, @@ -4857,6 +4867,10 @@ TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, + TFMobileViTForImageClassification, + TFMobileViTForSemanticSegmentation, + TFMobileViTModel, + TFMobileViTPreTrainedModel, ) from .models.mpnet import ( TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST, diff --git a/src/transformers/modeling_tf_outputs.py b/src/transformers/modeling_tf_outputs.py index a1d3df074fe780..efb2412084a756 100644 --- a/src/transformers/modeling_tf_outputs.py +++ b/src/transformers/modeling_tf_outputs.py @@ -685,6 +685,37 @@ class TFSemanticSegmenterOutput(ModelOutput): attentions: Optional[Tuple[tf.Tensor]] = None +@dataclass +class TFSemanticSegmenterOutputWithNoAttention(ModelOutput): + """ + Base class for outputs of semantic segmentation models that do not output attention scores. + + Args: + loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): + Classification scores for each pixel. + + + + The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is + to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the + original image size as post-processing. You should always check your logits shape and resize as needed. + + + + hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings, if the model has an embedding layer, + one for + the output of each layer) of shape `(batch_size, patch_size, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + loss: Optional[tf.Tensor] = None + logits: tf.Tensor = None + hidden_states: Optional[Tuple[tf.Tensor]] = None + + @dataclass class TFImageClassifierOutput(ModelOutput): """ diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index 991bb79a6b3719..a12f6accdcaeee 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -59,6 +59,7 @@ ("marian", "TFMarianModel"), ("mbart", "TFMBartModel"), ("mobilebert", "TFMobileBertModel"), + ("mobilevit", "TFMobileViTModel"), ("mpnet", "TFMPNetModel"), ("mt5", "TFMT5Model"), ("openai-gpt", "TFOpenAIGPTModel"), @@ -182,6 +183,7 @@ ("convnext", "TFConvNextForImageClassification"), ("data2vec-vision", "TFData2VecVisionForImageClassification"), ("deit", ("TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher")), + ("mobilevit", "TFMobileViTForImageClassification"), ("regnet", "TFRegNetForImageClassification"), ("resnet", "TFResNetForImageClassification"), ("segformer", "TFSegformerForImageClassification"), @@ -194,6 +196,7 @@ [ # Model for Semantic Segmentation mapping ("data2vec-vision", "TFData2VecVisionForSemanticSegmentation"), + ("mobilevit", "TFMobileViTForSemanticSegmentation"), ("segformer", "TFSegformerForSemanticSegmentation"), ] ) diff --git a/src/transformers/models/mobilevit/__init__.py b/src/transformers/models/mobilevit/__init__.py index cd639f50323c4f..e1e088f693ba24 100644 --- a/src/transformers/models/mobilevit/__init__.py +++ b/src/transformers/models/mobilevit/__init__.py @@ -17,7 +17,13 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_tf_available, + is_torch_available, + is_vision_available, +) _import_structure = { @@ -46,6 +52,19 @@ "MobileViTPreTrainedModel", ] +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_mobilevit"] = [ + "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFMobileViTForImageClassification", + "TFMobileViTForSemanticSegmentation", + "TFMobileViTModel", + "TFMobileViTPreTrainedModel", + ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig @@ -72,6 +91,28 @@ MobileViTPreTrainedModel, ) + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_mobilevit import MobileViTFeatureExtractor + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_mobilevit import ( + TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFMobileViTForImageClassification, + TFMobileViTForSemanticSegmentation, + TFMobileViTModel, + TFMobileViTPreTrainedModel, + ) + else: import sys diff --git a/src/transformers/models/mobilevit/modeling_tf_mobilevit.py b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py new file mode 100644 index 00000000000000..c54bd6554dc35d --- /dev/null +++ b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py @@ -0,0 +1,1152 @@ +# coding=utf-8 +# Copyright 2022 Apple Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE +""" TensorFlow 2.0 MobileViT model.""" + +from typing import Dict, Optional, Tuple, Union + +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...file_utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + replace_return_docstrings, +) +from ...modeling_tf_outputs import ( + TFBaseModelOutput, + TFBaseModelOutputWithPooling, + TFImageClassifierOutputWithNoAttention, + TFSemanticSegmenterOutputWithNoAttention, +) +from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs +from ...tf_utils import shape_list, stable_softmax +from ...utils import logging +from .configuration_mobilevit import MobileViTConfig + + +logger = logging.get_logger(__name__) + +# General docstring +_CONFIG_FOR_DOC = "MobileViTConfig" +_FEAT_EXTRACTOR_FOR_DOC = "MobileViTFeatureExtractor" + +# Base docstring +_CHECKPOINT_FOR_DOC = "apple/mobilevit-small" +_EXPECTED_OUTPUT_SHAPE = [1, 640, 8, 8] + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "apple/mobilevit-small" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + + +TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "apple/mobilevit-small", + "apple/mobilevit-x-small", + "apple/mobilevit-xx-small", + "apple/deeplabv3-mobilevit-small", + "apple/deeplabv3-mobilevit-x-small", + "apple/deeplabv3-mobilevit-xx-small", + # See all MobileViT models at https://huggingface.co/models?filter=mobilevit +] + + +def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: + """ + Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the + original TensorFlow repo. It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + """ + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_value < 0.9 * value: + new_value += divisor + return int(new_value) + + +class TFMobileViTConvLayer(tf.keras.layers.Layer): + def __init__( + self, + config: MobileViTConfig, + out_channels: int, + kernel_size: int, + stride: int = 1, + groups: int = 1, + bias: bool = False, + dilation: int = 1, + use_normalization: bool = True, + use_activation: Union[bool, str] = True, + **kwargs + ) -> None: + super().__init__(**kwargs) + logger.warning( + f"\n{self.__class__.__name__} has backpropagation operations that are NOT supported on CPU. If you wish " + "to train/fine-tine this model, you need a GPU or a TPU" + ) + + padding = int((kernel_size - 1) / 2) * dilation + self.padding = tf.keras.layers.ZeroPadding2D(padding) + + if out_channels % groups != 0: + raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") + + self.convolution = tf.keras.layers.Conv2D( + filters=out_channels, + kernel_size=kernel_size, + strides=stride, + padding="VALID", + dilation_rate=dilation, + groups=groups, + use_bias=bias, + name="convolution", + ) + + if use_normalization: + self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.1, name="normalization") + else: + self.normalization = None + + if use_activation: + if isinstance(use_activation, str): + self.activation = get_tf_activation(use_activation) + elif isinstance(config.hidden_act, str): + self.activation = get_tf_activation(config.hidden_act) + else: + self.activation = config.hidden_act + else: + self.activation = None + + def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: + padded_features = self.padding(features) + features = self.convolution(padded_features) + if self.normalization is not None: + features = self.normalization(features, training=training) + if self.activation is not None: + features = self.activation(features) + return features + + +class TFMobileViTInvertedResidual(tf.keras.layers.Layer): + """ + Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381 + """ + + def __init__( + self, config: MobileViTConfig, in_channels: int, out_channels: int, stride: int, dilation: int = 1, **kwargs + ) -> None: + super().__init__(**kwargs) + expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8) + + if stride not in [1, 2]: + raise ValueError(f"Invalid stride {stride}.") + + self.use_residual = (stride == 1) and (in_channels == out_channels) + + self.expand_1x1 = TFMobileViTConvLayer( + config, out_channels=expanded_channels, kernel_size=1, name="expand_1x1" + ) + + self.conv_3x3 = TFMobileViTConvLayer( + config, + out_channels=expanded_channels, + kernel_size=3, + stride=stride, + groups=expanded_channels, + dilation=dilation, + name="conv_3x3", + ) + + self.reduce_1x1 = TFMobileViTConvLayer( + config, + out_channels=out_channels, + kernel_size=1, + use_activation=False, + name="reduce_1x1", + ) + + def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: + residual = features + + features = self.expand_1x1(features, training=training) + features = self.conv_3x3(features, training=training) + features = self.reduce_1x1(features, training=training) + + return residual + features if self.use_residual else features + + +class TFMobileViTMobileNetLayer(tf.keras.layers.Layer): + def __init__( + self, + config: MobileViTConfig, + in_channels: int, + out_channels: int, + stride: int = 1, + num_stages: int = 1, + **kwargs + ) -> None: + super().__init__(**kwargs) + + self.layers = [] + for i in range(num_stages): + layer = TFMobileViTInvertedResidual( + config, + in_channels=in_channels, + out_channels=out_channels, + stride=stride if i == 0 else 1, + name=f"layer.{i}", + ) + self.layers.append(layer) + in_channels = out_channels + + def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: + for layer_module in self.layers: + features = layer_module(features, training=training) + return features + + +class TFMobileViTSelfAttention(tf.keras.layers.Layer): + def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None: + super().__init__(**kwargs) + + if hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size {hidden_size,} is not a multiple of the number of attention " + f"heads {config.num_attention_heads}." + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + scale = tf.cast(self.attention_head_size, dtype=tf.float32) + self.scale = tf.math.sqrt(scale) + + self.query = tf.keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="query") + self.key = tf.keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="key") + self.value = tf.keras.layers.Dense(self.all_head_size, use_bias=config.qkv_bias, name="value") + + self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x: tf.Tensor) -> tf.Tensor: + batch_size = tf.shape(x)[0] + x = tf.reshape(x, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) + return tf.transpose(x, perm=[0, 2, 1, 3]) + + def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: + batch_size = tf.shape(hidden_states)[0] + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(self.query(hidden_states)) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) + attention_scores = attention_scores / self.scale + + # Normalize the attention scores to probabilities. + attention_probs = stable_softmax(attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs, training=training) + + context_layer = tf.matmul(attention_probs, value_layer) + + context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) + context_layer = tf.reshape(context_layer, shape=(batch_size, -1, self.all_head_size)) + return context_layer + + +class TFMobileViTSelfOutput(tf.keras.layers.Layer): + def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None: + super().__init__(**kwargs) + self.dense = tf.keras.layers.Dense(hidden_size, name="dense") + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + + def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + return hidden_states + + +class TFMobileViTAttention(tf.keras.layers.Layer): + def __init__(self, config: MobileViTConfig, hidden_size: int, **kwargs) -> None: + super().__init__(**kwargs) + self.attention = TFMobileViTSelfAttention(config, hidden_size, name="attention") + self.dense_output = TFMobileViTSelfOutput(config, hidden_size, name="output") + + def prune_heads(self, heads): + raise NotImplementedError + + def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: + self_outputs = self.attention(hidden_states, training=training) + attention_output = self.dense_output(self_outputs, training=training) + return attention_output + + +class TFMobileViTIntermediate(tf.keras.layers.Layer): + def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None: + super().__init__(**kwargs) + self.dense = tf.keras.layers.Dense(intermediate_size, name="dense") + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = get_tf_activation(config.hidden_act) + else: + self.intermediate_act_fn = config.hidden_act + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class TFMobileViTOutput(tf.keras.layers.Layer): + def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None: + super().__init__(**kwargs) + self.dense = tf.keras.layers.Dense(hidden_size, name="dense") + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + + def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = hidden_states + input_tensor + return hidden_states + + +class TFMobileViTTransformerLayer(tf.keras.layers.Layer): + def __init__(self, config: MobileViTConfig, hidden_size: int, intermediate_size: int, **kwargs) -> None: + super().__init__(**kwargs) + self.attention = TFMobileViTAttention(config, hidden_size, name="attention") + self.intermediate = TFMobileViTIntermediate(config, hidden_size, intermediate_size, name="intermediate") + self.mobilevit_output = TFMobileViTOutput(config, hidden_size, intermediate_size, name="output") + self.layernorm_before = tf.keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name="layernorm_before" + ) + self.layernorm_after = tf.keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name="layernorm_after" + ) + + def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: + attention_output = self.attention(self.layernorm_before(hidden_states), training=training) + hidden_states = attention_output + hidden_states + + layer_output = self.layernorm_after(hidden_states) + layer_output = self.intermediate(layer_output) + layer_output = self.mobilevit_output(layer_output, hidden_states, training=training) + return layer_output + + +class TFMobileViTTransformer(tf.keras.layers.Layer): + def __init__(self, config: MobileViTConfig, hidden_size: int, num_stages: int, **kwargs) -> None: + super().__init__(**kwargs) + + self.layers = [] + for i in range(num_stages): + transformer_layer = TFMobileViTTransformerLayer( + config, + hidden_size=hidden_size, + intermediate_size=int(hidden_size * config.mlp_ratio), + name=f"layer.{i}", + ) + self.layers.append(transformer_layer) + + def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: + for layer_module in self.layers: + hidden_states = layer_module(hidden_states, training=training) + return hidden_states + + +class TFMobileViTLayer(tf.keras.layers.Layer): + """ + MobileViT block: https://arxiv.org/abs/2110.02178 + """ + + def __init__( + self, + config: MobileViTConfig, + in_channels: int, + out_channels: int, + stride: int, + hidden_size: int, + num_stages: int, + dilation: int = 1, + **kwargs + ) -> None: + super().__init__(**kwargs) + self.patch_width = config.patch_size + self.patch_height = config.patch_size + + if stride == 2: + self.downsampling_layer = TFMobileViTInvertedResidual( + config, + in_channels=in_channels, + out_channels=out_channels, + stride=stride if dilation == 1 else 1, + dilation=dilation // 2 if dilation > 1 else 1, + name="downsampling_layer", + ) + in_channels = out_channels + else: + self.downsampling_layer = None + + self.conv_kxk = TFMobileViTConvLayer( + config, out_channels=in_channels, kernel_size=config.conv_kernel_size, name="conv_kxk" + ) + + self.conv_1x1 = TFMobileViTConvLayer( + config, + out_channels=hidden_size, + kernel_size=1, + use_normalization=False, + use_activation=False, + name="conv_1x1", + ) + + self.transformer = TFMobileViTTransformer( + config, hidden_size=hidden_size, num_stages=num_stages, name="transformer" + ) + + self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") + + self.conv_projection = TFMobileViTConvLayer( + config, out_channels=in_channels, kernel_size=1, name="conv_projection" + ) + + self.fusion = TFMobileViTConvLayer( + config, out_channels=in_channels, kernel_size=config.conv_kernel_size, name="fusion" + ) + + def unfolding(self, features: tf.Tensor) -> Tuple[tf.Tensor, Dict]: + patch_width, patch_height = self.patch_width, self.patch_height + patch_area = tf.cast(patch_width * patch_height, "int32") + + batch_size = tf.shape(features)[0] + orig_height = tf.shape(features)[1] + orig_width = tf.shape(features)[2] + channels = tf.shape(features)[3] + + new_height = tf.cast(tf.math.ceil(orig_height / patch_height) * patch_height, "int32") + new_width = tf.cast(tf.math.ceil(orig_width / patch_width) * patch_width, "int32") + + interpolate = new_width != orig_width or new_height != orig_height + if interpolate: + # Note: Padding can be done, but then it needs to be handled in attention function. + features = tf.image.resize(features, size=(new_height, new_width), method="bilinear") + + # number of patches along width and height + num_patch_width = new_width // patch_width + num_patch_height = new_height // patch_height + num_patches = num_patch_height * num_patch_width + + # convert from shape (batch_size, orig_height, orig_width, channels) + # to the shape (batch_size * patch_area, num_patches, channels) + features = tf.transpose(features, [0, 3, 1, 2]) + patches = tf.reshape( + features, (batch_size * channels * num_patch_height, patch_height, num_patch_width, patch_width) + ) + patches = tf.transpose(patches, [0, 2, 1, 3]) + patches = tf.reshape(patches, (batch_size, channels, num_patches, patch_area)) + patches = tf.transpose(patches, [0, 3, 2, 1]) + patches = tf.reshape(patches, (batch_size * patch_area, num_patches, channels)) + + info_dict = { + "orig_size": (orig_height, orig_width), + "batch_size": batch_size, + "channels": channels, + "interpolate": interpolate, + "num_patches": num_patches, + "num_patches_width": num_patch_width, + "num_patches_height": num_patch_height, + } + return patches, info_dict + + def folding(self, patches: tf.Tensor, info_dict: Dict) -> tf.Tensor: + patch_width, patch_height = self.patch_width, self.patch_height + patch_area = int(patch_width * patch_height) + + batch_size = info_dict["batch_size"] + channels = info_dict["channels"] + num_patches = info_dict["num_patches"] + num_patch_height = info_dict["num_patches_height"] + num_patch_width = info_dict["num_patches_width"] + + # convert from shape (batch_size * patch_area, num_patches, channels) + # back to shape (batch_size, channels, orig_height, orig_width) + features = tf.reshape(patches, (batch_size, patch_area, num_patches, -1)) + features = tf.transpose(features, perm=(0, 3, 2, 1)) + features = tf.reshape( + features, (batch_size * channels * num_patch_height, num_patch_width, patch_height, patch_width) + ) + features = tf.transpose(features, perm=(0, 2, 1, 3)) + features = tf.reshape( + features, (batch_size, channels, num_patch_height * patch_height, num_patch_width * patch_width) + ) + features = tf.transpose(features, perm=(0, 2, 3, 1)) + + if info_dict["interpolate"]: + features = tf.image.resize(features, size=info_dict["orig_size"], method="bilinear") + + return features + + def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: + # reduce spatial dimensions if needed + if self.downsampling_layer: + features = self.downsampling_layer(features, training=training) + + residual = features + + # local representation + features = self.conv_kxk(features, training=training) + features = self.conv_1x1(features, training=training) + + # convert feature map to patches + patches, info_dict = self.unfolding(features) + + # learn global representations + patches = self.transformer(patches, training=training) + patches = self.layernorm(patches) + + # convert patches back to feature maps + features = self.folding(patches, info_dict) + + features = self.conv_projection(features, training=training) + features = self.fusion(tf.concat([residual, features], axis=-1), training=training) + return features + + +class TFMobileViTEncoder(tf.keras.layers.Layer): + def __init__(self, config: MobileViTConfig, **kwargs) -> None: + super().__init__(**kwargs) + self.config = config + + self.layers = [] + + # segmentation architectures like DeepLab and PSPNet modify the strides + # of the classification backbones + dilate_layer_4 = dilate_layer_5 = False + if config.output_stride == 8: + dilate_layer_4 = True + dilate_layer_5 = True + elif config.output_stride == 16: + dilate_layer_5 = True + + dilation = 1 + + layer_1 = TFMobileViTMobileNetLayer( + config, + in_channels=config.neck_hidden_sizes[0], + out_channels=config.neck_hidden_sizes[1], + stride=1, + num_stages=1, + name="layer.0", + ) + self.layers.append(layer_1) + + layer_2 = TFMobileViTMobileNetLayer( + config, + in_channels=config.neck_hidden_sizes[1], + out_channels=config.neck_hidden_sizes[2], + stride=2, + num_stages=3, + name="layer.1", + ) + self.layers.append(layer_2) + + layer_3 = TFMobileViTLayer( + config, + in_channels=config.neck_hidden_sizes[2], + out_channels=config.neck_hidden_sizes[3], + stride=2, + hidden_size=config.hidden_sizes[0], + num_stages=2, + name="layer.2", + ) + self.layers.append(layer_3) + + if dilate_layer_4: + dilation *= 2 + + layer_4 = TFMobileViTLayer( + config, + in_channels=config.neck_hidden_sizes[3], + out_channels=config.neck_hidden_sizes[4], + stride=2, + hidden_size=config.hidden_sizes[1], + num_stages=4, + dilation=dilation, + name="layer.3", + ) + self.layers.append(layer_4) + + if dilate_layer_5: + dilation *= 2 + + layer_5 = TFMobileViTLayer( + config, + in_channels=config.neck_hidden_sizes[4], + out_channels=config.neck_hidden_sizes[5], + stride=2, + hidden_size=config.hidden_sizes[2], + num_stages=3, + dilation=dilation, + name="layer.4", + ) + self.layers.append(layer_5) + + def call( + self, + hidden_states: tf.Tensor, + output_hidden_states: bool = False, + return_dict: bool = True, + training: bool = False, + ) -> Union[tuple, TFBaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + + for i, layer_module in enumerate(self.layers): + hidden_states = layer_module(hidden_states, training=training) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) + + return TFBaseModelOutput(last_hidden_state=hidden_states, hidden_states=all_hidden_states) + + +@keras_serializable +class TFMobileViTMainLayer(tf.keras.layers.Layer): + config_class = MobileViTConfig + + def __init__(self, config: MobileViTConfig, expand_output: bool = True, **kwargs): + super().__init__(**kwargs) + self.config = config + self.expand_output = expand_output + + self.conv_stem = TFMobileViTConvLayer( + config, + out_channels=config.neck_hidden_sizes[0], + kernel_size=3, + stride=2, + name="conv_stem", + ) + + self.encoder = TFMobileViTEncoder(config, name="encoder") + + if self.expand_output: + self.conv_1x1_exp = TFMobileViTConvLayer( + config, out_channels=config.neck_hidden_sizes[6], kernel_size=1, name="conv_1x1_exp" + ) + + self.pooler = tf.keras.layers.GlobalAveragePooling2D(data_format="channels_first", name="pooler") + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + raise NotImplementedError + + @unpack_inputs + def call( + self, + pixel_values: Optional[tf.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPooling]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. + # So change the input format from `NCHW` to `NHWC`. + # shape = (batch_size, in_height, in_width, in_channels=num_channels) + pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) + + embedding_output = self.conv_stem(pixel_values, training=training) + + encoder_outputs = self.encoder( + embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training + ) + + if self.expand_output: + last_hidden_state = self.conv_1x1_exp(encoder_outputs[0]) + + # Change to NCHW output format to have uniformity in the modules + last_hidden_state = tf.transpose(last_hidden_state, perm=[0, 3, 1, 2]) + + # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels) + pooled_output = self.pooler(last_hidden_state) + else: + last_hidden_state = encoder_outputs[0] + # Change to NCHW output format to have uniformity in the modules + last_hidden_state = tf.transpose(last_hidden_state, perm=[0, 3, 1, 2]) + pooled_output = None + + if not return_dict: + output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,) + + # Change to NCHW output format to have uniformity in the modules + if not self.expand_output: + remaining_encoder_outputs = encoder_outputs[1:] + remaining_encoder_outputs = tuple( + [tf.transpose(h, perm=(0, 3, 1, 2)) for h in remaining_encoder_outputs[0]] + ) + remaining_encoder_outputs = (remaining_encoder_outputs,) + return output + remaining_encoder_outputs + else: + return output + encoder_outputs[1:] + + # Change the other hidden state outputs to NCHW as well + if output_hidden_states: + hidden_states = tuple([tf.transpose(h, perm=(0, 3, 1, 2)) for h in encoder_outputs[1]]) + + return TFBaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states, + ) + + +class TFMobileViTPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = MobileViTConfig + base_model_prefix = "mobilevit" + main_input_name = "pixel_values" + + @property + def dummy_inputs(self) -> Dict[str, tf.Tensor]: + """ + Dummy inputs to build the network. + + Returns: + `Dict[str, tf.Tensor]`: The dummy inputs. + """ + VISION_DUMMY_INPUTS = tf.random.uniform( + shape=(3, self.config.num_channels, self.config.image_size, self.config.image_size), + dtype=tf.float32, + ) + return {"pixel_values": tf.constant(VISION_DUMMY_INPUTS)} + + @tf.function( + input_signature=[ + { + "pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"), + } + ] + ) + def serving(self, inputs): + """ + Method used for serving the model. + + Args: + inputs (`Dict[str, tf.Tensor]`): + The input of the saved model as a dictionary of tensors. + """ + output = self.call(inputs) + return self.serving_output(output) + + +MOBILEVIT_START_DOCSTRING = r""" + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TF 2.0 models accepts two formats as inputs: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional arguments. + + This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the + tensors in the first argument of the model call function: `model(inputs)`. + + + + Parameters: + config ([`MobileViTConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MOBILEVIT_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`MobileViTFeatureExtractor`]. See + [`MobileViTFeatureExtractor.__call__`] for details. + + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. +""" + + +@add_start_docstrings( + "The bare MobileViT model outputting raw hidden-states without any specific head on top.", + MOBILEVIT_START_DOCSTRING, +) +class TFMobileViTModel(TFMobileViTPreTrainedModel): + def __init__(self, config: MobileViTConfig, expand_output: bool = True, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.config = config + self.expand_output = expand_output + + self.mobilevit = TFMobileViTMainLayer(config, expand_output=expand_output, name="mobilevit") + + @unpack_inputs + @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + processor_class=_FEAT_EXTRACTOR_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFBaseModelOutputWithPooling, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def call( + self, + pixel_values: Optional[tf.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[Tuple[tf.Tensor], TFBaseModelOutputWithPooling]: + + output = self.mobilevit(pixel_values, output_hidden_states, return_dict, training=training) + return output + + def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: + # hidden_states not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions + return TFBaseModelOutputWithPooling( + last_hidden_state=output.last_hidden_state, + pooler_output=output.pooler_output, + hidden_states=output.hidden_states, + ) + + +@add_start_docstrings( + """ + MobileViT model with an image classification head on top (a linear layer on top of the pooled features), e.g. for + ImageNet. + """, + MOBILEVIT_START_DOCSTRING, +) +class TFMobileViTForImageClassification(TFMobileViTPreTrainedModel, TFSequenceClassificationLoss): + def __init__(self, config: MobileViTConfig, *inputs, **kwargs) -> None: + super().__init__(config, *inputs, **kwargs) + + self.num_labels = config.num_labels + self.mobilevit = TFMobileViTMainLayer(config, name="mobilevit") + + # Classifier head + self.dropout = tf.keras.layers.Dropout(config.classifier_dropout_prob) + self.classifier = ( + tf.keras.layers.Dense(config.num_labels, name="classifier") if config.num_labels > 0 else tf.identity + ) + + @unpack_inputs + @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + processor_class=_FEAT_EXTRACTOR_FOR_DOC, + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=TFImageClassifierOutputWithNoAttention, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def call( + self, + pixel_values: Optional[tf.Tensor] = None, + output_hidden_states: Optional[bool] = None, + labels: Optional[tf.Tensor] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[tuple, TFImageClassifierOutputWithNoAttention]: + r""" + labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mobilevit( + pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training + ) + + pooled_output = outputs.pooler_output if return_dict else outputs[1] + + logits = self.classifier(self.dropout(pooled_output, training=training)) + loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states) + + def serving_output(self, output: TFImageClassifierOutputWithNoAttention) -> TFImageClassifierOutputWithNoAttention: + # hidden_states and attention not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions + return TFImageClassifierOutputWithNoAttention(logits=output.logits, hidden_states=output.hidden_states) + + +class TFMobileViTASPPPooling(tf.keras.layers.Layer): + def __init__(self, config: MobileViTConfig, out_channels: int, **kwargs) -> None: + super().__init__(**kwargs) + + self.global_pool = tf.keras.layers.GlobalAveragePooling2D(keepdims=True, name="global_pool") + + self.conv_1x1 = TFMobileViTConvLayer( + config, + out_channels=out_channels, + kernel_size=1, + stride=1, + use_normalization=True, + use_activation="relu", + name="conv_1x1", + ) + + def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: + spatial_size = shape_list(features)[1:-1] + features = self.global_pool(features) + features = self.conv_1x1(features, training=training) + features = tf.image.resize(features, size=spatial_size, method="bilinear") + return features + + +class TFMobileViTASPP(tf.keras.layers.Layer): + """ + ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587 + """ + + def __init__(self, config: MobileViTConfig, **kwargs) -> None: + super().__init__(**kwargs) + + out_channels = config.aspp_out_channels + + if len(config.atrous_rates) != 3: + raise ValueError("Expected 3 values for atrous_rates") + + self.convs = [] + + in_projection = TFMobileViTConvLayer( + config, + out_channels=out_channels, + kernel_size=1, + use_activation="relu", + name="convs.0", + ) + self.convs.append(in_projection) + + self.convs.extend( + [ + TFMobileViTConvLayer( + config, + out_channels=out_channels, + kernel_size=3, + dilation=rate, + use_activation="relu", + name=f"convs.{i + 1}", + ) + for i, rate in enumerate(config.atrous_rates) + ] + ) + + pool_layer = TFMobileViTASPPPooling(config, out_channels, name=f"convs.{len(config.atrous_rates) + 1}") + self.convs.append(pool_layer) + + self.project = TFMobileViTConvLayer( + config, + out_channels=out_channels, + kernel_size=1, + use_activation="relu", + name="project", + ) + + self.dropout = tf.keras.layers.Dropout(config.aspp_dropout_prob) + + def call(self, features: tf.Tensor, training: bool = False) -> tf.Tensor: + # since the hidden states were transposed to have `(batch_size, channels, height, width)` + # layout we transpose them back to have `(batch_size, height, width, channels)` layout. + features = tf.transpose(features, perm=[0, 2, 3, 1]) + pyramid = [] + for conv in self.convs: + pyramid.append(conv(features, training=training)) + pyramid = tf.concat(pyramid, axis=-1) + + pooled_features = self.project(pyramid, training=training) + pooled_features = self.dropout(pooled_features, training=training) + return pooled_features + + +class TFMobileViTDeepLabV3(tf.keras.layers.Layer): + """ + DeepLabv3 architecture: https://arxiv.org/abs/1706.05587 + """ + + def __init__(self, config: MobileViTConfig, **kwargs) -> None: + super().__init__(**kwargs) + self.aspp = TFMobileViTASPP(config, name="aspp") + + self.dropout = tf.keras.layers.Dropout(config.classifier_dropout_prob) + + self.classifier = TFMobileViTConvLayer( + config, + out_channels=config.num_labels, + kernel_size=1, + use_normalization=False, + use_activation=False, + bias=True, + name="classifier", + ) + + def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: + features = self.aspp(hidden_states[-1], training=training) + features = self.dropout(features, training=training) + features = self.classifier(features, training=training) + return features + + +@add_start_docstrings( + """ + MobileViT model with a semantic segmentation head on top, e.g. for Pascal VOC. + """, + MOBILEVIT_START_DOCSTRING, +) +class TFMobileViTForSemanticSegmentation(TFMobileViTPreTrainedModel): + def __init__(self, config: MobileViTConfig, **kwargs) -> None: + super().__init__(config, **kwargs) + + self.num_labels = config.num_labels + self.mobilevit = TFMobileViTMainLayer(config, expand_output=False, name="mobilevit") + self.segmentation_head = TFMobileViTDeepLabV3(config, name="segmentation_head") + + def hf_compute_loss(self, logits, labels): + # upsample logits to the images' original size + # `labels` is of shape (batch_size, height, width) + label_interp_shape = shape_list(labels)[1:] + + upsampled_logits = tf.image.resize(logits, size=label_interp_shape, method="bilinear") + # compute weighted loss + loss_fct = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction="none") + + def masked_loss(real, pred): + unmasked_loss = loss_fct(real, pred) + mask = tf.cast(real != self.config.semantic_loss_ignore_index, dtype=unmasked_loss.dtype) + masked_loss = unmasked_loss * mask + # Reduction strategy in the similar spirit with + # https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_utils.py#L210 + reduced_masked_loss = tf.reduce_sum(masked_loss) / tf.reduce_sum(mask) + return tf.reshape(reduced_masked_loss, (1,)) + + return masked_loss(labels, upsampled_logits) + + @unpack_inputs + @add_start_docstrings_to_model_forward(MOBILEVIT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFSemanticSegmenterOutputWithNoAttention, config_class=_CONFIG_FOR_DOC) + def call( + self, + pixel_values: Optional[tf.Tensor] = None, + labels: Optional[tf.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[tuple, TFSemanticSegmenterOutputWithNoAttention]: + r""" + labels (`tf.Tensor` of shape `(batch_size, height, width)`, *optional*): + Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). + + Returns: + + Examples: + + ```python + >>> from transformers import MobileViTFeatureExtractor, TFMobileViTForSemanticSegmentation + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/deeplabv3-mobilevit-small") + >>> model = TFMobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-small") + + >>> inputs = feature_extractor(images=image, return_tensors="tf") + + >>> outputs = model(**inputs) + + >>> # logits are of shape (batch_size, num_labels, height, width) + >>> logits = outputs.logits + ```""" + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mobilevit( + pixel_values, + output_hidden_states=True, # we need the intermediate hidden states + return_dict=return_dict, + training=training, + ) + + encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] + + logits = self.segmentation_head(encoder_hidden_states, training=training) + + loss = None + if labels is not None: + if not self.config.num_labels > 1: + raise ValueError("The number of labels should be greater than one") + else: + loss = self.hf_compute_loss(logits=logits, labels=labels) + + # make logits of shape (batch_size, num_labels, height, width) to + # keep them consistent across APIs + logits = tf.transpose(logits, perm=[0, 3, 1, 2]) + + if not return_dict: + if output_hidden_states: + output = (logits,) + outputs[1:] + else: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFSemanticSegmenterOutputWithNoAttention( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states if output_hidden_states else None, + ) + + def serving_output( + self, output: TFSemanticSegmenterOutputWithNoAttention + ) -> TFSemanticSegmenterOutputWithNoAttention: + return TFSemanticSegmenterOutputWithNoAttention(logits=output.logits, hidden_states=output.hidden_states) diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index e09d26aec5c96b..bc3eb64ca46dab 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -1524,6 +1524,9 @@ def __init__(self, *args, **kwargs): TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None +TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + class TFMobileBertForMaskedLM(metaclass=DummyObject): _backends = ["tf"] @@ -1594,6 +1597,34 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +class TFMobileViTForImageClassification(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileViTForSemanticSegmentation(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileViTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFMobileViTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/mobilevit/test_modeling_tf_mobilevit.py b/tests/models/mobilevit/test_modeling_tf_mobilevit.py new file mode 100644 index 00000000000000..d46ee895ed71f5 --- /dev/null +++ b/tests/models/mobilevit/test_modeling_tf_mobilevit.py @@ -0,0 +1,420 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the TensorFlow MobileViT model. """ + + +import inspect +import unittest + +from transformers import MobileViTConfig +from transformers.file_utils import is_tf_available, is_vision_available +from transformers.testing_utils import require_tf, slow + +from ...test_configuration_common import ConfigTester +from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor + + +if is_tf_available(): + import numpy as np + import tensorflow as tf + + from transformers import TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel + from transformers.models.mobilevit.modeling_tf_mobilevit import TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from PIL import Image + + from transformers import MobileViTFeatureExtractor + + +class TFMobileViTConfigTester(ConfigTester): + def create_and_test_config_common_properties(self): + config = self.config_class(**self.inputs_dict) + self.parent.assertTrue(hasattr(config, "hidden_sizes")) + self.parent.assertTrue(hasattr(config, "neck_hidden_sizes")) + self.parent.assertTrue(hasattr(config, "num_attention_heads")) + + +class TFMobileViTModelTester: + def __init__( + self, + parent, + batch_size=13, + image_size=32, + patch_size=2, + num_channels=3, + last_hidden_size=640, + num_attention_heads=4, + hidden_act="silu", + conv_kernel_size=3, + output_stride=32, + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + classifier_dropout_prob=0.1, + initializer_range=0.02, + is_training=True, + use_labels=True, + num_labels=10, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.last_hidden_size = last_hidden_size + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.conv_kernel_size = conv_kernel_size + self.output_stride = output_stride + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.classifier_dropout_prob = classifier_dropout_prob + self.use_labels = use_labels + self.is_training = is_training + self.num_labels = num_labels + self.initializer_range = initializer_range + self.scope = scope + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + + labels = None + pixel_labels = None + if self.use_labels: + labels = ids_tensor([self.batch_size], self.num_labels) + pixel_labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) + + config = self.get_config() + + return config, pixel_values, labels, pixel_labels + + def get_config(self): + return MobileViTConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + num_attention_heads=self.num_attention_heads, + hidden_act=self.hidden_act, + conv_kernel_size=self.conv_kernel_size, + output_stride=self.output_stride, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + classifier_dropout_prob=self.classifier_dropout_prob, + initializer_range=self.initializer_range, + ) + + def create_and_check_model(self, config, pixel_values, labels, pixel_labels): + model = TFMobileViTModel(config=config) + result = model(pixel_values, training=False) + expected_height = expected_width = self.image_size // self.output_stride + self.parent.assertEqual( + result.last_hidden_state.shape, (self.batch_size, self.last_hidden_size, expected_height, expected_width) + ) + + def create_and_check_for_image_classification(self, config, pixel_values, labels, pixel_labels): + config.num_labels = self.num_labels + model = TFMobileViTForImageClassification(config) + result = model(pixel_values, labels=labels, training=False) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) + + def create_and_check_for_semantic_segmentation(self, config, pixel_values, labels, pixel_labels): + config.num_labels = self.num_labels + model = TFMobileViTForSemanticSegmentation(config) + expected_height = expected_width = self.image_size // self.output_stride + + result = model(pixel_values, training=False) + self.parent.assertEqual( + result.logits.shape, (self.batch_size, self.num_labels, expected_height, expected_width) + ) + + result = model(pixel_values, labels=pixel_labels, training=False) + self.parent.assertEqual( + result.logits.shape, (self.batch_size, self.num_labels, expected_height, expected_width) + ) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values, labels, pixel_labels = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_tf +class MobileViTModelTest(TFModelTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as MobileViT does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = ( + (TFMobileViTModel, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation) + if is_tf_available() + else () + ) + + test_pruning = False + test_resize_embeddings = False + test_head_masking = False + has_attentions = False + test_onnx = False + + def setUp(self): + self.model_tester = TFMobileViTModelTester(self) + self.config_tester = TFMobileViTConfigTester(self, config_class=MobileViTConfig, has_text_modality=False) + + def test_config(self): + self.config_tester.run_common_tests() + + @unittest.skip(reason="MobileViT does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="MobileViT does not support input and output embeddings") + def test_model_common_attributes(self): + pass + + @unittest.skip(reason="MobileViT does not output attentions") + def test_attention_outputs(self): + pass + + @unittest.skip("Test was written for TF 1.x and isn't really relevant here") + def test_compile_tf_model(self): + pass + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.call) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.hidden_states + + expected_num_stages = 5 + self.assertEqual(len(hidden_states), expected_num_stages) + + # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) + # with the width and height being successively divided by 2. + divisor = 2 + for i in range(len(hidden_states)): + self.assertListEqual( + list(hidden_states[i].shape[-2:]), + [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], + ) + divisor *= 2 + + self.assertEqual(self.model_tester.output_stride, divisor // 2) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + def test_for_image_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_image_classification(*config_and_inputs) + + def test_for_semantic_segmentation(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_semantic_segmentation(*config_and_inputs) + + @unittest.skipIf( + not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, + reason="TF does not support backprop for grouped convolutions on CPU.", + ) + def test_dataset_conversion(self): + super().test_dataset_conversion() + + def check_keras_fit_results(self, val_loss1, val_loss2, atol=2e-1, rtol=2e-1): + self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) + + @unittest.skipIf( + not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, + reason="TF does not support backprop for grouped convolutions on CPU.", + ) + def test_keras_fit(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + # Since `TFMobileViTModel` cannot operate with the default `fit()` method. + if model_class.__name__ != "TFMobileViTModel": + model = model_class(config) + if getattr(model, "hf_compute_loss", None): + super().test_keras_fit() + + # The default test_loss_computation() uses -100 as a proxy ignore_index + # to test masked losses. Overridding to avoid -100 since semantic segmentation + # models use `semantic_loss_ignore_index` from the config. + def test_loss_computation(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + for model_class in self.all_model_classes: + # set an ignore index to correctly test the masked loss used in + # `TFMobileViTForSemanticSegmentation`. + if model_class.__name__ != "TFMobileViTForSemanticSegmentation": + config.semantic_loss_ignore_index = 5 + + model = model_class(config) + if getattr(model, "hf_compute_loss", None): + # The number of elements in the loss should be the same as the number of elements in the label + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + added_label = prepared_for_class[ + sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0] + ] + expected_loss_size = added_label.shape.as_list()[:1] + + # Test that model correctly compute the loss with kwargs + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + possible_input_names = {"input_ids", "pixel_values", "input_features"} + input_name = possible_input_names.intersection(set(prepared_for_class)).pop() + model_input = prepared_for_class.pop(input_name) + + loss = model(model_input, **prepared_for_class)[0] + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + + # Test that model correctly compute the loss when we mask some positions + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + possible_input_names = {"input_ids", "pixel_values", "input_features"} + input_name = possible_input_names.intersection(set(prepared_for_class)).pop() + model_input = prepared_for_class.pop(input_name) + if "labels" in prepared_for_class: + labels = prepared_for_class["labels"].numpy() + if len(labels.shape) > 1 and labels.shape[1] != 1: + # labels[0] = -100 + prepared_for_class["labels"] = tf.convert_to_tensor(labels) + loss = model(model_input, **prepared_for_class)[0] + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + self.assertTrue(not np.any(np.isnan(loss.numpy()))) + + # Test that model correctly compute the loss with a dict + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + loss = model(prepared_for_class)[0] + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + + # Test that model correctly compute the loss with a tuple + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + + # Get keys that were added with the _prepare_for_class function + label_keys = prepared_for_class.keys() - inputs_dict.keys() + signature = inspect.signature(model.call).parameters + signature_names = list(signature.keys()) + + # Create a dictionary holding the location of the tensors in the tuple + tuple_index_mapping = {0: input_name} + for label_key in label_keys: + label_key_index = signature_names.index(label_key) + tuple_index_mapping[label_key_index] = label_key + sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) + # Initialize a list with their default values, update the values and convert to a tuple + list_input = [] + + for name in signature_names: + if name != "kwargs": + list_input.append(signature[name].default) + + for index, value in sorted_tuple_index_mapping: + list_input[index] = prepared_for_class[value] + + tuple_input = tuple(list_input) + + # Send to model + loss = model(tuple_input[:-1])[0] + + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + + @slow + def test_model_from_pretrained(self): + for model_name in TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = TFMobileViTModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_tf +class TFMobileViTModelIntegrationTest(unittest.TestCase): + @slow + def test_inference_image_classification_head(self): + model = TFMobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small") + + feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/mobilevit-xx-small") + image = prepare_img() + inputs = feature_extractor(images=image, return_tensors="tf") + + # forward pass + outputs = model(**inputs, training=False) + + # verify the logits + expected_shape = tf.TensorShape((1, 1000)) + self.assertEqual(outputs.logits.shape, expected_shape) + + expected_slice = tf.constant([-1.9364, -1.2327, -0.4653]) + + tf.debugging.assert_near(outputs.logits[0, :3], expected_slice, atol=1e-4, rtol=1e-04) + + @slow + def test_inference_semantic_segmentation(self): + # `from_pt` will be removed + model = TFMobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") + + feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") + + image = prepare_img() + inputs = feature_extractor(images=image, return_tensors="tf") + + # forward pass + outputs = model(inputs.pixel_values, training=False) + logits = outputs.logits + + # verify the logits + expected_shape = tf.TensorShape((1, 21, 32, 32)) + self.assertEqual(logits.shape, expected_shape) + + expected_slice = tf.constant( + [ + [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], + [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], + [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], + ] + ) + + tf.debugging.assert_near(logits[0, :3, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index c8ef3a07f7a6e8..abe539cf4613b5 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -51,6 +51,7 @@ src/transformers/models/mbart/modeling_mbart.py src/transformers/models/mobilebert/modeling_mobilebert.py src/transformers/models/mobilebert/modeling_tf_mobilebert.py src/transformers/models/mobilevit/modeling_mobilevit.py +src/transformers/models/mobilevit/modeling_tf_mobilevit.py src/transformers/models/opt/modeling_opt.py src/transformers/models/opt/modeling_tf_opt.py src/transformers/models/owlvit/modeling_owlvit.py From 1c381f36001fcd983d0cafc9762a8154e74ab41b Mon Sep 17 00:00:00 2001 From: Cody Yu Date: Thu, 1 Sep 2022 05:45:33 -1000 Subject: [PATCH 194/539] Cache results of is_torch_tpu_available() (#18777) * Cache results of is_torch_tpu_available() * Update src/transformers/utils/import_utils.py * Update src/transformers/utils/import_utils.py --- src/transformers/utils/import_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 219552976a0a6c..d0dff20a9719d2 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -21,7 +21,7 @@ import sys import warnings from collections import OrderedDict -from functools import wraps +from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any @@ -414,6 +414,7 @@ def is_ftfy_available(): return _ftfy_available +@lru_cache() def is_torch_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" if not _torch_available: From c61f116b639ef7ca1ada8ca06822d0bfb50c3890 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 1 Sep 2022 12:06:56 -0400 Subject: [PATCH 195/539] Tie weights after preparing the model in run_clm (#18855) --- examples/pytorch/language-modeling/run_clm_no_trainer.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index f5ea78f8328967..dee0fee8a070e7 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -477,10 +477,6 @@ def group_texts(examples): ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) - # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. - if accelerator.distributed_type == DistributedType.TPU: - model.tie_weights() - # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) @@ -500,6 +496,10 @@ def group_texts(examples): model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) + # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. + if accelerator.distributed_type == DistributedType.TPU: + model.tie_weights() + # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: From ddb69e5af80914d25c10217f06a70326f2391d54 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Thu, 1 Sep 2022 18:07:14 +0200 Subject: [PATCH 196/539] Add Image To Text Generation pipeline (#18821) * Add Image2TextGenerationPipeline to supported pipelines * Add Flax and Tensorflow support * Add Flax and Tensorflow small tests * Add default model for Tensorflow * Add docstring * Fix doc style * Add tiny models for pytorch and flax * Remove flax from pipeline. Fix tests * Use ydshieh/vit-gpt2-coco-en as a default for both PyTorch and Tensorflow * Fix Tensorflow support Co-authored-by: Olivier Dehaene --- docs/source/en/main_classes/pipelines.mdx | 7 + src/transformers/__init__.py | 2 + src/transformers/pipelines/__init__.py | 17 +- .../pipelines/image2text_generation.py | 96 ++++++++++ .../test_pipelines_image2text_generation.py | 171 ++++++++++++++++++ 5 files changed, 292 insertions(+), 1 deletion(-) create mode 100644 src/transformers/pipelines/image2text_generation.py create mode 100644 tests/pipelines/test_pipelines_image2text_generation.py diff --git a/docs/source/en/main_classes/pipelines.mdx b/docs/source/en/main_classes/pipelines.mdx index 1dc76e67cda6a8..2ab730ef553821 100644 --- a/docs/source/en/main_classes/pipelines.mdx +++ b/docs/source/en/main_classes/pipelines.mdx @@ -29,6 +29,7 @@ There are two categories of pipeline abstractions to be aware about: - [`FillMaskPipeline`] - [`ImageClassificationPipeline`] - [`ImageSegmentationPipeline`] + - [`Image2TextGenerationPipeline`] - [`ObjectDetectionPipeline`] - [`QuestionAnsweringPipeline`] - [`SummarizationPipeline`] @@ -365,6 +366,12 @@ That should enable you to do all the custom code you want. - __call__ - all +### Image2TextGenerationPipeline + +[[autodoc]] Image2TextGenerationPipeline + - __call__ + - all + ### NerPipeline [[autodoc]] NerPipeline diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 8be2f73b9e7a55..98e7a7317d3c43 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -384,6 +384,7 @@ "CsvPipelineDataFormat", "FeatureExtractionPipeline", "FillMaskPipeline", + "Image2TextGenerationPipeline", "ImageClassificationPipeline", "ImageSegmentationPipeline", "JsonPipelineDataFormat", @@ -3191,6 +3192,7 @@ CsvPipelineDataFormat, FeatureExtractionPipeline, FillMaskPipeline, + Image2TextGenerationPipeline, ImageClassificationPipeline, ImageSegmentationPipeline, JsonPipelineDataFormat, diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 74f6e796801c7e..0f29b4a971531d 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -53,6 +53,7 @@ from .conversational import Conversation, ConversationalPipeline from .feature_extraction import FeatureExtractionPipeline from .fill_mask import FillMaskPipeline +from .image2text_generation import Image2TextGenerationPipeline from .image_classification import ImageClassificationPipeline from .image_segmentation import ImageSegmentationPipeline from .object_detection import ObjectDetectionPipeline @@ -90,6 +91,7 @@ TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, + TFAutoModelForVision2Seq, ) if is_torch_available(): @@ -118,6 +120,7 @@ AutoModelForSpeechSeq2Seq, AutoModelForTableQuestionAnswering, AutoModelForTokenClassification, + AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, ) if TYPE_CHECKING: @@ -302,6 +305,18 @@ "default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "fc15262")}}, "type": "image", }, + "image2text-generation": { + "impl": Image2TextGenerationPipeline, + "tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (), + "pt": (AutoModelForVision2Seq,) if is_torch_available() else (), + "default": { + "model": { + "pt": ("ydshieh/vit-gpt2-coco-en", "65636df"), + "tf": ("ydshieh/vit-gpt2-coco-en", "65636df"), + } + }, + "type": "multimodal", + }, "object-detection": { "impl": ObjectDetectionPipeline, "tf": (), @@ -317,7 +332,7 @@ # any tokenizer/feature_extractor might be use for a given model so we cannot # use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to # see if the model defines such objects or not. -MULTI_MODEL_CONFIGS = {"VisionTextDualEncoderConfig", "SpeechEncoderDecoderConfig"} +MULTI_MODEL_CONFIGS = {"SpeechEncoderDecoderConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig"} for task, values in SUPPORTED_TASKS.items(): if values["type"] == "text": NO_FEATURE_EXTRACTOR_TASKS.add(task) diff --git a/src/transformers/pipelines/image2text_generation.py b/src/transformers/pipelines/image2text_generation.py new file mode 100644 index 00000000000000..22dad12a924cad --- /dev/null +++ b/src/transformers/pipelines/image2text_generation.py @@ -0,0 +1,96 @@ +from typing import List, Union + +from ..utils import ( + add_end_docstrings, + is_tf_available, + is_torch_available, + is_vision_available, + logging, + requires_backends, +) +from .base import PIPELINE_INIT_ARGS, Pipeline + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_tf_available(): + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(PIPELINE_INIT_ARGS) +class Image2TextGenerationPipeline(Pipeline): + """ + Image2Text Generation pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given + image. + + This image to text generation pipeline can currently be loaded from pipeline() using the following task identifier: + "image2text-generation". + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?pipeline_tag=image-to-text). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + requires_backends(self, "vision") + self.check_model_type( + TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING + ) + + def _sanitize_parameters(self, **kwargs): + return {}, {}, {} + + def __call__(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], **kwargs): + """ + Assign labels to the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a HTTP(s) link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. + + Return: + A list or a list of list of `dict`: Each result comes as a dictionary with the following key: + + - **generated_text** (`str`) -- The generated text. + """ + return super().__call__(images, **kwargs) + + def preprocess(self, image): + image = load_image(image) + model_inputs = self.feature_extractor(images=image, return_tensors=self.framework) + return model_inputs + + def _forward(self, model_inputs): + # FIXME: We need to pop here due to a difference in how `generation_utils.py` and `generation_tf_utils.py` + # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas + # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` + # in the `_prepare_model_inputs` method. + inputs = model_inputs.pop(self.model.main_input_name) + model_outputs = self.model.generate(inputs, **model_inputs) + return model_outputs + + def postprocess(self, model_outputs): + records = [] + for output_ids in model_outputs: + record = { + "generated_text": self.tokenizer.decode( + output_ids, + skip_special_tokens=True, + ) + } + records.append(record) + return records diff --git a/tests/pipelines/test_pipelines_image2text_generation.py b/tests/pipelines/test_pipelines_image2text_generation.py new file mode 100644 index 00000000000000..ce0a3d64faea57 --- /dev/null +++ b/tests/pipelines/test_pipelines_image2text_generation.py @@ -0,0 +1,171 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available +from transformers.pipelines import pipeline +from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, require_vision, slow + +from .test_pipelines_common import ANY, PipelineTestCaseMeta + + +if is_vision_available(): + from PIL import Image +else: + + class Image: + @staticmethod + def open(*args, **kwargs): + pass + + +@is_pipeline_test +@require_vision +class Image2TextGenerationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): + model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING + tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING + + def get_test_pipeline(self, model, tokenizer, feature_extractor): + pipe = pipeline("image2text-generation", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) + examples = [ + Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), + "./tests/fixtures/tests_samples/COCO/000000039769.png", + ] + return pipe, examples + + def run_pipeline_test(self, pipe, examples): + outputs = pipe(examples) + self.assertEqual( + outputs, + [ + [{"generated_text": ANY(str)}], + [{"generated_text": ANY(str)}], + ], + ) + + @require_tf + def test_small_model_tf(self): + pipe = pipeline("image2text-generation", model="hf-internal-testing/tiny-random-vit-gpt2") + image = "./tests/fixtures/tests_samples/COCO/000000039769.png" + + outputs = pipe(image) + self.assertEqual( + outputs, + [ + { + "generated_text": ( + " intermedi intermedi intermedi intermedi intermedi " + "explorer explorer explorer explorer explorer explorer " + "explorer medicine medicine medicine medicine medicine " + "medicine medicine" + ) + }, + ], + ) + + outputs = pipe([image, image]) + self.assertEqual( + outputs, + [ + [ + { + "generated_text": ( + " intermedi intermedi intermedi intermedi intermedi " + "explorer explorer explorer explorer explorer explorer " + "explorer medicine medicine medicine medicine medicine " + "medicine medicine" + ) + }, + ], + [ + { + "generated_text": ( + " intermedi intermedi intermedi intermedi intermedi " + "explorer explorer explorer explorer explorer explorer " + "explorer medicine medicine medicine medicine medicine " + "medicine medicine" + ) + }, + ], + ], + ) + + @require_torch + def test_small_model_pt(self): + pipe = pipeline("image2text-generation", model="hf-internal-testing/tiny-random-vit-gpt2") + image = "./tests/fixtures/tests_samples/COCO/000000039769.png" + + outputs = pipe(image) + self.assertEqual( + outputs, + [ + { + "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" + }, + ], + ) + + outputs = pipe([image, image]) + self.assertEqual( + outputs, + [ + [ + { + "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" + } + ], + [ + { + "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" + } + ], + ], + ) + + @slow + @require_torch + def test_large_model_pt(self): + pipe = pipeline("image2text-generation", model="ydshieh/vit-gpt2-coco-en") + image = "./tests/fixtures/tests_samples/COCO/000000039769.png" + + outputs = pipe(image) + self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) + + outputs = pipe([image, image]) + self.assertEqual( + outputs, + [ + [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], + [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], + ], + ) + + @slow + @require_tf + def test_large_model_tf(self): + pipe = pipeline("image2text-generation", model="ydshieh/vit-gpt2-coco-en") + image = "./tests/fixtures/tests_samples/COCO/000000039769.png" + + outputs = pipe(image) + self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) + + outputs = pipe([image, image]) + self.assertEqual( + outputs, + [ + [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], + [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], + ], + ) From 23fab60b678e2124f0c0bbbaa1c605b8789cb227 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Thu, 1 Sep 2022 09:52:33 -0700 Subject: [PATCH 197/539] Pin revision for LayoutLMForQuestionAnswering and TFLayoutLMForQuestionAnswering tests (#18854) * Pin revision for tests * Fixup * Update revision in models * Shorten revisions Co-authored-by: Ankur Goyal --- src/transformers/models/layoutlm/modeling_layoutlm.py | 2 +- src/transformers/models/layoutlm/modeling_tf_layoutlm.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/layoutlm/modeling_layoutlm.py b/src/transformers/models/layoutlm/modeling_layoutlm.py index 0b1970210e57bc..c7ccd61962dac0 100644 --- a/src/transformers/models/layoutlm/modeling_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_layoutlm.py @@ -1285,7 +1285,7 @@ def forward( >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("impira/layoutlm-document-qa", add_prefix_space=True) - >>> model = LayoutLMForQuestionAnswering.from_pretrained("impira/layoutlm-document-qa") + >>> model = LayoutLMForQuestionAnswering.from_pretrained("impira/layoutlm-document-qa", revision="1e3ebac") >>> dataset = load_dataset("nielsr/funsd", split="train") >>> example = dataset[0] diff --git a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py index 8c28651ab8f72c..a166b1709b5643 100644 --- a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py @@ -1448,7 +1448,7 @@ def call( >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("impira/layoutlm-document-qa", add_prefix_space=True) - >>> model = TFLayoutLMForQuestionAnswering.from_pretrained("impira/layoutlm-document-qa") + >>> model = TFLayoutLMForQuestionAnswering.from_pretrained("impira/layoutlm-document-qa", revision="1e3ebac") >>> dataset = load_dataset("nielsr/funsd", split="train") >>> example = dataset[0] From 142e12afb457e31ec0e7323d9eba7b8bfee46c57 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Thu, 1 Sep 2022 13:19:11 -0700 Subject: [PATCH 198/539] Split docs on modality (#18205) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * update * 🖍 add missing files * 📝 add nested sections * 🖍 align titles with tasks * oops * remove quotes from titles --- docs/source/en/_toctree.yml | 115 ++++++++++++++++++++---------------- 1 file changed, 63 insertions(+), 52 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 78137d2c8a74c1..4543a6c066b387 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -21,44 +21,55 @@ title: Share a model title: Tutorials - sections: - - local: fast_tokenizers - title: Use tokenizers from 🤗 Tokenizers - - local: create_a_model - title: Create a custom architecture - - local: custom_models - title: Sharing custom models - sections: - - local: tasks/sequence_classification - title: Text classification - - local: tasks/token_classification - title: Token classification - - local: tasks/question_answering - title: Question answering - - local: tasks/language_modeling - title: Language modeling - - local: tasks/translation - title: Translation - - local: tasks/summarization - title: Summarization - - local: tasks/multiple_choice - title: Multiple choice + - local: create_a_model + title: Create a custom architecture + - local: custom_models + title: Sharing custom models + - local: run_scripts + title: Train with a script + - local: sagemaker + title: Run training on Amazon SageMaker + - local: converting_tensorflow_models + title: Converting TensorFlow Checkpoints + - local: serialization + title: Export 🤗 Transformers models + - local: troubleshooting + title: Troubleshoot + title: General usage + - sections: + - local: fast_tokenizers + title: Use tokenizers from 🤗 Tokenizers + - local: multilingual + title: Inference for multilingual models + - sections: + - local: tasks/sequence_classification + title: Text classification + - local: tasks/token_classification + title: Token classification + - local: tasks/question_answering + title: Question answering + - local: tasks/language_modeling + title: Language modeling + - local: tasks/translation + title: Translation + - local: tasks/summarization + title: Summarization + - local: tasks/multiple_choice + title: Multiple choice + title: Task guides + isExpanded: false + title: Natural Language Processing + - sections: - local: tasks/audio_classification title: Audio classification - local: tasks/asr title: Automatic speech recognition + title: Audio + - sections: - local: tasks/image_classification title: Image classification - title: Fine-tune for downstream tasks - - local: run_scripts - title: Train with a script - - local: sagemaker - title: Run training on Amazon SageMaker - - local: multilingual - title: Inference for multilingual models - - local: converting_tensorflow_models - title: Converting TensorFlow Checkpoints - - local: serialization - title: Export 🤗 Transformers models + title: Computer Vision - sections: - local: performance title: Overview @@ -84,31 +95,31 @@ title: Inference on Specialized Hardware - local: perf_hardware title: Custom hardware for training + - local: big_models + title: Instantiating a big model + - local: debugging + title: Debugging title: Performance and scalability - - local: big_models - title: Instantiating a big model + - sections: + - local: contributing + title: How to contribute to transformers? + - local: add_new_model + title: How to add a model to 🤗 Transformers? + - local: add_new_pipeline + title: How to add a pipeline to 🤗 Transformers? + - local: testing + title: Testing + - local: pr_checks + title: Checks on a Pull Request + title: Contribute + - local: notebooks + title: 🤗 Transformers Notebooks + - local: community + title: Community resources - local: benchmarks title: Benchmarks - local: migration title: Migrating from previous packages - - local: troubleshooting - title: Troubleshoot - - local: debugging - title: Debugging - - local: notebooks - title: 🤗 Transformers Notebooks - - local: community - title: Community - - local: contributing - title: How to contribute to transformers? - - local: add_new_model - title: How to add a model to 🤗 Transformers? - - local: add_new_pipeline - title: How to create a custom pipeline? - - local: testing - title: Testing - - local: pr_checks - title: Checks on a Pull Request title: How-to guides - sections: - local: philosophy @@ -478,4 +489,4 @@ - local: internal/file_utils title: General Utilities title: Internal Helpers - title: API + title: API \ No newline at end of file From 9b3eb81014dd3be8c8934942900422eaea9f0de5 Mon Sep 17 00:00:00 2001 From: kmckiern Date: Fri, 2 Sep 2022 04:46:31 -0700 Subject: [PATCH 199/539] if learning rate is a tensor, get item (float) (#18861) --- src/transformers/trainer_pt_utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/transformers/trainer_pt_utils.py b/src/transformers/trainer_pt_utils.py index 57103b50d5a039..7baa7a46e95932 100644 --- a/src/transformers/trainer_pt_utils.py +++ b/src/transformers/trainer_pt_utils.py @@ -837,6 +837,8 @@ def _get_learning_rate(self): if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.4") else self.lr_scheduler.get_lr()[0] ) + if torch.is_tensor(last_lr): + last_lr = last_lr.item() return last_lr From 129d73294ef2383369af135a4b9f10940a9de3c1 Mon Sep 17 00:00:00 2001 From: OlivierDehaene Date: Fri, 2 Sep 2022 13:55:30 +0200 Subject: [PATCH 200/539] Fix naming issue with ImageToText pipeline (#18864) Co-authored-by: Olivier Dehaene --- docs/source/en/main_classes/pipelines.mdx | 6 +++--- src/transformers/__init__.py | 4 ++-- src/transformers/pipelines/__init__.py | 6 +++--- .../{image2text_generation.py => image_to_text.py} | 9 ++++----- ...generation.py => test_pipelines_image_to_text.py} | 12 ++++++------ 5 files changed, 18 insertions(+), 19 deletions(-) rename src/transformers/pipelines/{image2text_generation.py => image_to_text.py} (90%) rename tests/pipelines/{test_pipelines_image2text_generation.py => test_pipelines_image_to_text.py} (90%) diff --git a/docs/source/en/main_classes/pipelines.mdx b/docs/source/en/main_classes/pipelines.mdx index 2ab730ef553821..b2de7e048dd5aa 100644 --- a/docs/source/en/main_classes/pipelines.mdx +++ b/docs/source/en/main_classes/pipelines.mdx @@ -29,7 +29,7 @@ There are two categories of pipeline abstractions to be aware about: - [`FillMaskPipeline`] - [`ImageClassificationPipeline`] - [`ImageSegmentationPipeline`] - - [`Image2TextGenerationPipeline`] + - [`ImageToTextPipeline`] - [`ObjectDetectionPipeline`] - [`QuestionAnsweringPipeline`] - [`SummarizationPipeline`] @@ -366,9 +366,9 @@ That should enable you to do all the custom code you want. - __call__ - all -### Image2TextGenerationPipeline +### ImageToTextPipeline -[[autodoc]] Image2TextGenerationPipeline +[[autodoc]] ImageToTextPipeline - __call__ - all diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 98e7a7317d3c43..b952ff085f4a5f 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -384,9 +384,9 @@ "CsvPipelineDataFormat", "FeatureExtractionPipeline", "FillMaskPipeline", - "Image2TextGenerationPipeline", "ImageClassificationPipeline", "ImageSegmentationPipeline", + "ImageToTextPipeline", "JsonPipelineDataFormat", "NerPipeline", "ObjectDetectionPipeline", @@ -3192,9 +3192,9 @@ CsvPipelineDataFormat, FeatureExtractionPipeline, FillMaskPipeline, - Image2TextGenerationPipeline, ImageClassificationPipeline, ImageSegmentationPipeline, + ImageToTextPipeline, JsonPipelineDataFormat, NerPipeline, ObjectDetectionPipeline, diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index 0f29b4a971531d..ee7dee57c0e9d2 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -53,9 +53,9 @@ from .conversational import Conversation, ConversationalPipeline from .feature_extraction import FeatureExtractionPipeline from .fill_mask import FillMaskPipeline -from .image2text_generation import Image2TextGenerationPipeline from .image_classification import ImageClassificationPipeline from .image_segmentation import ImageSegmentationPipeline +from .image_to_text import ImageToTextPipeline from .object_detection import ObjectDetectionPipeline from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline @@ -305,8 +305,8 @@ "default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "fc15262")}}, "type": "image", }, - "image2text-generation": { - "impl": Image2TextGenerationPipeline, + "image-to-text": { + "impl": ImageToTextPipeline, "tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (), "pt": (AutoModelForVision2Seq,) if is_torch_available() else (), "default": { diff --git a/src/transformers/pipelines/image2text_generation.py b/src/transformers/pipelines/image_to_text.py similarity index 90% rename from src/transformers/pipelines/image2text_generation.py rename to src/transformers/pipelines/image_to_text.py index 22dad12a924cad..934525533ef8cb 100644 --- a/src/transformers/pipelines/image2text_generation.py +++ b/src/transformers/pipelines/image_to_text.py @@ -26,13 +26,12 @@ @add_end_docstrings(PIPELINE_INIT_ARGS) -class Image2TextGenerationPipeline(Pipeline): +class ImageToTextPipeline(Pipeline): """ - Image2Text Generation pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given - image. + Image To Text pipeline using a `AutoModelForVision2Seq`. This pipeline predicts a caption for a given image. - This image to text generation pipeline can currently be loaded from pipeline() using the following task identifier: - "image2text-generation". + This image to text pipeline can currently be loaded from pipeline() using the following task identifier: + "image-to-text". See the list of available models on [huggingface.co/models](https://huggingface.co/models?pipeline_tag=image-to-text). diff --git a/tests/pipelines/test_pipelines_image2text_generation.py b/tests/pipelines/test_pipelines_image_to_text.py similarity index 90% rename from tests/pipelines/test_pipelines_image2text_generation.py rename to tests/pipelines/test_pipelines_image_to_text.py index ce0a3d64faea57..897c3b2e47250d 100644 --- a/tests/pipelines/test_pipelines_image2text_generation.py +++ b/tests/pipelines/test_pipelines_image_to_text.py @@ -33,12 +33,12 @@ def open(*args, **kwargs): @is_pipeline_test @require_vision -class Image2TextGenerationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): +class ImageToTextPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING def get_test_pipeline(self, model, tokenizer, feature_extractor): - pipe = pipeline("image2text-generation", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) + pipe = pipeline("image-to-text", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "./tests/fixtures/tests_samples/COCO/000000039769.png", @@ -57,7 +57,7 @@ def run_pipeline_test(self, pipe, examples): @require_tf def test_small_model_tf(self): - pipe = pipeline("image2text-generation", model="hf-internal-testing/tiny-random-vit-gpt2") + pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) @@ -104,7 +104,7 @@ def test_small_model_tf(self): @require_torch def test_small_model_pt(self): - pipe = pipeline("image2text-generation", model="hf-internal-testing/tiny-random-vit-gpt2") + pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) @@ -137,7 +137,7 @@ def test_small_model_pt(self): @slow @require_torch def test_large_model_pt(self): - pipe = pipeline("image2text-generation", model="ydshieh/vit-gpt2-coco-en") + pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) @@ -155,7 +155,7 @@ def test_large_model_pt(self): @slow @require_tf def test_large_model_tf(self): - pipe = pipeline("image2text-generation", model="ydshieh/vit-gpt2-coco-en") + pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) From c60dd98e87373e7f0f5af29f3d49411c2e81fb69 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Fri, 2 Sep 2022 14:48:19 +0200 Subject: [PATCH 201/539] [LayoutLM] Add clarification to docs (#18716) * Add clarification * Add another clarification * Apply suggestion Co-authored-by: Niels Rogge --- docs/source/en/model_doc/layoutlm.mdx | 3 ++- .../models/layoutlmv2/feature_extraction_layoutlmv2.py | 3 ++- .../models/layoutlmv3/feature_extraction_layoutlmv3.py | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/source/en/model_doc/layoutlm.mdx b/docs/source/en/model_doc/layoutlm.mdx index e463c67d91a03a..257a4600c8ccf7 100644 --- a/docs/source/en/model_doc/layoutlm.mdx +++ b/docs/source/en/model_doc/layoutlm.mdx @@ -67,7 +67,8 @@ occurs. Those can be obtained using the Python Image Library (PIL) library for e ```python from PIL import Image -image = Image.open("name_of_your_document - can be a png file, pdf, etc.") +# Document can be a png, jpg, etc. PDFs must be converted to images. +image = Image.open(name_of_your_document).convert("RGB") width, height = image.size ``` diff --git a/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py b/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py index cd05819e479a91..5ccf4d7842347a 100644 --- a/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py +++ b/src/transformers/models/layoutlmv2/feature_extraction_layoutlmv2.py @@ -168,7 +168,8 @@ def __call__( >>> from transformers import LayoutLMv2FeatureExtractor >>> from PIL import Image - >>> image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") + >>> # Document can be a png, jpg, etc. PDFs must be converted to images. + >>> image = Image.open(name_of_your_document).convert("RGB") >>> # option 1: with apply_ocr=True (default) >>> feature_extractor = LayoutLMv2FeatureExtractor() diff --git a/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py b/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py index 2d771a27903d3b..6d0d5d1a2673c3 100644 --- a/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/feature_extraction_layoutlmv3.py @@ -179,7 +179,8 @@ def __call__( >>> from transformers import LayoutLMv3FeatureExtractor >>> from PIL import Image - >>> image = Image.open("name_of_your_document - can be a png file, pdf, etc.").convert("RGB") + >>> # Document can be a png, jpg, etc. PDFs must be converted to images. + >>> image = Image.open(name_of_your_document).convert("RGB") >>> # option 1: with apply_ocr=True (default) >>> feature_extractor = LayoutLMv3FeatureExtractor() From 17981faf6791c1549e7dcb62970c9c699e619ea7 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Fri, 2 Sep 2022 15:59:25 +0200 Subject: [PATCH 202/539] Add OWL-ViT to the appropriate section (#18867) Co-authored-by: Niels Rogge --- docs/source/en/_toctree.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 4543a6c066b387..57b53c8aa88297 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -376,8 +376,6 @@ title: MaskFormer - local: model_doc/mobilevit title: MobileViT - - local: model_doc/owlvit - title: OWL-ViT - local: model_doc/poolformer title: PoolFormer - local: model_doc/regnet @@ -452,6 +450,8 @@ title: LayoutXLM - local: model_doc/lxmert title: LXMERT + - local: model_doc/owlvit + title: OWL-ViT - local: model_doc/perceiver title: Perceiver - local: model_doc/speech-encoder-decoder From 38c3cd52fb6b39e2253d055ea583537efb29cd31 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 2 Sep 2022 10:30:06 -0400 Subject: [PATCH 203/539] Clean up utils.hub using the latest from hf_hub (#18857) * Clean up utils.hub using the latest from hf_hub * Adapt test * Address review comment * Fix test --- setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- src/transformers/utils/hub.py | 88 +++++++------------ src/transformers/utils/logging.py | 4 + tests/utils/test_logging.py | 18 ++-- 5 files changed, 44 insertions(+), 70 deletions(-) diff --git a/setup.py b/setup.py index e974ff9a2b2b9a..8f101357e8ce8f 100644 --- a/setup.py +++ b/setup.py @@ -116,7 +116,7 @@ "fugashi>=1.0", "GitPython<3.1.19", "hf-doc-builder>=0.3.0", - "huggingface-hub>=0.8.1,<1.0", + "huggingface-hub>=0.9.0,<1.0", "importlib_metadata", "ipadic>=1.0.0,<2.0", "isort>=5.5.4", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index c8f0f18793c0a5..58e4a2cd42c372 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -22,7 +22,7 @@ "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", - "huggingface-hub": "huggingface-hub>=0.8.1,<1.0", + "huggingface-hub": "huggingface-hub>=0.9.0,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 163ad64ffa173b..9b1e9a5b85eb02 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -21,7 +21,6 @@ import sys import traceback import warnings -from contextlib import contextmanager from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from uuid import uuid4 @@ -39,7 +38,12 @@ ) from huggingface_hub.constants import HUGGINGFACE_HEADER_X_LINKED_ETAG, HUGGINGFACE_HEADER_X_REPO_COMMIT from huggingface_hub.file_download import REGEX_COMMIT_HASH -from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError +from huggingface_hub.utils import ( + EntryNotFoundError, + LocalEntryNotFoundError, + RepositoryNotFoundError, + RevisionNotFoundError, +) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm @@ -249,28 +253,6 @@ def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None, commit_h return cached_file if os.path.isfile(cached_file) else None -# If huggingface_hub changes the class of error for this to FileNotFoundError, we will be able to avoid that in the -# future. -LOCAL_FILES_ONLY_HF_ERROR = ( - "Cannot find the requested files in the disk cache and outgoing traffic has been disabled. To enable hf.co " - "look-ups and downloads online, set 'local_files_only' to False." -) - - -# In the future, this ugly contextmanager can be removed when huggingface_hub as a released version where we can -# activate/deactivate progress bars. -@contextmanager -def _patch_hf_hub_tqdm(): - """ - A context manager to make huggingface hub use the tqdm version of Transformers (which is controlled by some utils) - in logging. - """ - old_tqdm = huggingface_hub.file_download.tqdm - huggingface_hub.file_download.tqdm = tqdm - yield - huggingface_hub.file_download.tqdm = old_tqdm - - def cached_file( path_or_repo_id: Union[str, os.PathLike], filename: str, @@ -375,20 +357,19 @@ def cached_file( user_agent = http_user_agent(user_agent) try: # Load from URL or cache if already cached - with _patch_hf_hub_tqdm(): - resolved_file = hf_hub_download( - path_or_repo_id, - filename, - subfolder=None if len(subfolder) == 0 else subfolder, - revision=revision, - cache_dir=cache_dir, - user_agent=user_agent, - force_download=force_download, - proxies=proxies, - resume_download=resume_download, - use_auth_token=use_auth_token, - local_files_only=local_files_only, - ) + resolved_file = hf_hub_download( + path_or_repo_id, + filename, + subfolder=None if len(subfolder) == 0 else subfolder, + revision=revision, + cache_dir=cache_dir, + user_agent=user_agent, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + use_auth_token=use_auth_token, + local_files_only=local_files_only, + ) except RepositoryNotFoundError: raise EnvironmentError( @@ -403,6 +384,19 @@ def cached_file( "for this model name. Check the model page at " f"'https://huggingface.co/{path_or_repo_id}' for available revisions." ) + except LocalEntryNotFoundError: + # We try to see if we have a cached version (not up to date): + resolved_file = try_to_load_from_cache(cache_dir, path_or_repo_id, full_filename, revision=revision) + if resolved_file is not None: + return resolved_file + if not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors: + return None + raise EnvironmentError( + f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the" + f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named" + f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at" + " 'https://huggingface.co/docs/transformers/installation#offline-mode'." + ) except EntryNotFoundError: if not _raise_exceptions_for_missing_entries: return None @@ -421,24 +415,6 @@ def cached_file( return None raise EnvironmentError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{err}") - except ValueError as err: - # HuggingFace Hub returns a ValueError for a missing file when local_files_only=True we need to catch it here - # This could be caught above along in `EntryNotFoundError` if hf_hub sent a different error message here - if LOCAL_FILES_ONLY_HF_ERROR in err.args[0] and local_files_only and not _raise_exceptions_for_missing_entries: - return None - - # Otherwise we try to see if we have a cached version (not up to date): - resolved_file = try_to_load_from_cache(cache_dir, path_or_repo_id, full_filename, revision=revision) - if resolved_file is not None: - return resolved_file - if not _raise_exceptions_for_connection_errors: - return None - raise EnvironmentError( - f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the" - f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named" - f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at" - " 'https://huggingface.co/docs/transformers/installation#offline-mode'." - ) return resolved_file diff --git a/src/transformers/utils/logging.py b/src/transformers/utils/logging.py index 91ecca7cfcacd8..a98e2f30fd6eb8 100644 --- a/src/transformers/utils/logging.py +++ b/src/transformers/utils/logging.py @@ -30,6 +30,8 @@ from tqdm import auto as tqdm_lib +import huggingface_hub.utils as hf_hub_utils + _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None @@ -336,9 +338,11 @@ def enable_progress_bar(): """Enable tqdm progress bar.""" global _tqdm_active _tqdm_active = True + hf_hub_utils.enable_progress_bars() def disable_progress_bar(): """Disable tqdm progress bar.""" global _tqdm_active _tqdm_active = False + hf_hub_utils.disable_progress_bars() diff --git a/tests/utils/test_logging.py b/tests/utils/test_logging.py index 81940d2d3bee9d..81f3d9144ad78f 100644 --- a/tests/utils/test_logging.py +++ b/tests/utils/test_logging.py @@ -14,10 +14,10 @@ import os import unittest -from unittest.mock import patch import transformers.models.bart.tokenization_bart -from transformers import AutoConfig, logging +from huggingface_hub.utils import are_progress_bars_disabled +from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar @@ -126,14 +126,8 @@ def test_advisory_warnings(self): def test_set_progress_bar_enabled(): - TINY_MODEL = "hf-internal-testing/tiny-random-distilbert" - with patch("tqdm.auto.tqdm") as mock_tqdm: - disable_progress_bar() - _ = AutoConfig.from_pretrained(TINY_MODEL, force_download=True) - mock_tqdm.assert_not_called() + disable_progress_bar() + assert are_progress_bars_disabled() - mock_tqdm.reset_mock() - - enable_progress_bar() - _ = AutoConfig.from_pretrained(TINY_MODEL, force_download=True) - mock_tqdm.assert_called() + enable_progress_bar() + assert not are_progress_bars_disabled() From 0ab465a5d2609f66281e239f80f1fff044061be1 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 2 Sep 2022 16:49:08 +0200 Subject: [PATCH 204/539] pin Slack SDK to 3.18.1 to avoid failing issue (#18869) Co-authored-by: ydshieh --- .github/workflows/doctests.yml | 2 +- .github/workflows/self-nightly-scheduled.yml | 2 +- .github/workflows/self-past.yml | 2 +- .github/workflows/self-push.yml | 2 +- .github/workflows/self-scheduled.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/doctests.yml b/.github/workflows/doctests.yml index 9fc74e2e6cf8c2..8a8968be52d582 100644 --- a/.github/workflows/doctests.yml +++ b/.github/workflows/doctests.yml @@ -74,5 +74,5 @@ jobs: CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_DOCS }} CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} run: | - pip install slack_sdk + pip install slack_sdk==3.18.1 python utils/notification_service_doc_tests.py diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index 371620c5438813..78b14463b31a9a 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -233,5 +233,5 @@ jobs: # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | - pip install slack_sdk + pip install slack_sdk==3.18.1 python utils/notification_service.py "${{ needs.setup.outputs.matrix }}" diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index b3871dc92fa47f..c9776899775e24 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -180,7 +180,7 @@ jobs: # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | - pip install slack_sdk + pip install slack_sdk==3.18.1 python utils/notification_service.py "${{ needs.setup.outputs.matrix }}" # Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack. diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index d0efae8b479844..dd1222609d811c 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -527,5 +527,5 @@ jobs: # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | - pip install slack_sdk + pip install slack_sdk==3.18.1 python utils/notification_service.py "${{ needs.setup.outputs.matrix }}" diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 323ca5eb54db23..4d8feba32a54cc 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -358,5 +358,5 @@ jobs: # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | - pip install slack_sdk + pip install slack_sdk==3.18.1 python utils/notification_service.py "${{ needs.setup.outputs.matrix }}" From 9e346f7436c04390cfbfa2287c854848d916befe Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 2 Sep 2022 10:49:39 -0400 Subject: [PATCH 205/539] Fix number of examples for iterable datasets in multiprocessing (#18856) * Fix number of examples for iterable datasets in multiprocessing * Add stronger check --- src/transformers/trainer.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index c1eab71fff03db..ae1b2458524817 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -3040,13 +3040,15 @@ def evaluation_loop( num_samples = len(eval_dataset) # The instance check is weird and does not actually check for the type, but whether the dataset has the right # methods. Therefore we need to make sure it also has the attribute. - elif isinstance(eval_dataset, IterableDatasetShard) and hasattr(eval_dataset, "num_examples"): + elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0: num_samples = eval_dataset.num_examples else: if has_length(dataloader): num_samples = self.num_examples(dataloader) else: # both len(dataloader.dataset) and len(dataloader) fail num_samples = observed_num_examples + if num_samples == 0 and observed_num_examples > 0: + num_samples = observed_num_examples # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of # samplers has been rounded to a multiple of batch_size, so we truncate. From c5be7cae59de52cb53c092cdbc6fcbaeb8e07d97 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Fri, 2 Sep 2022 08:22:46 -0700 Subject: [PATCH 206/539] postpone bnb load until it's needed (#18859) --- src/transformers/modeling_utils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 04196633e1407f..4c9354d795b299 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -84,8 +84,6 @@ else: get_balanced_memory = None -if is_bitsandbytes_available(): - from .utils.bitsandbytes import get_key_to_not_convert, replace_8bit_linear, set_module_8bit_tensor_to_device logger = logging.get_logger(__name__) @@ -527,6 +525,9 @@ def _load_state_dict_into_meta_model( # - Is there a situation where some keys aren't in `loaded_state_dict_keys` and in which case # they won't get loaded. + if load_in_8bit: + from .utils.bitsandbytes import set_module_8bit_tensor_to_device + error_msgs = [] old_keys = [] @@ -2142,6 +2143,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P model = cls(config, *model_args, **model_kwargs) if load_in_8bit: + from .utils.bitsandbytes import get_key_to_not_convert, replace_8bit_linear + logger.info("Detected 8-bit loading: activating 8-bit loading for this model") # We never convert lm_head or any last modules for numerical stability reasons @@ -2279,6 +2282,9 @@ def _load_pretrained_model( dtype=None, load_in_8bit=False, ): + if load_in_8bit: + from .utils.bitsandbytes import set_module_8bit_tensor_to_device + if device_map is not None and "disk" in device_map.values(): if offload_folder is None: raise ValueError( From 9196f48b950abe5fa1533b9c8a985d9b28839df2 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Fri, 2 Sep 2022 16:25:26 +0100 Subject: [PATCH 207/539] Generate: validate `model_kwargs` on TF (and catch typos in generate arguments) (#18651) --- src/transformers/generation_tf_utils.py | 30 ++- tests/generation/test_generation_tf_utils.py | 183 +++++++++++++++++++ tests/generation/test_generation_utils.py | 4 +- tests/test_modeling_tf_common.py | 136 -------------- 4 files changed, 214 insertions(+), 139 deletions(-) create mode 100644 tests/generation/test_generation_tf_utils.py diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index 64cecebbe84838..d5f92b51e722ec 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -579,6 +579,7 @@ def generate( do_sample = do_sample if do_sample is not None else self.config.do_sample if do_sample is False or num_beams == 1: + seed = model_kwargs.pop("seed", None) return self._generate( input_ids=input_ids, max_length=max_length, @@ -601,13 +602,14 @@ def generate( attention_mask=attention_mask, decoder_start_token_id=decoder_start_token_id, use_cache=use_cache, - seed=model_kwargs.pop("seed", None), + seed=seed, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, + **model_kwargs, ) # We cannot generate if the model does not have a LM head @@ -1288,6 +1290,29 @@ def adjust_logits_during_generation( else: return logits + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): + """Validates model kwargs for generation. Generate argument typos will also be caught here.""" + # Excludes arguments that are handled before calling any model function + if self.config.is_encoder_decoder: + for key in ["decoder_input_ids"]: + model_kwargs.pop(key, None) + + unused_model_args = [] + model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) + # `kwargs` if often used to handle optional forward pass inputs like `attention_mask`. If + # `prepare_inputs_for_generation` doesn't accept `kwargs`, then a stricter check can be made ;) + if "kwargs" in model_args: + model_args |= set(inspect.signature(self.call).parameters) + for key, value in model_kwargs.items(): + if value is not None and key not in model_args: + unused_model_args.append(key) + + if unused_model_args: + raise ValueError( + f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" + " generate arguments will also show up in this list)" + ) + def _generate( self, input_ids=None, @@ -1483,6 +1508,9 @@ def _generate( # generate sequences without allowing bad_words to be generated outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) ```""" + # 0. Validate model kwargs + self._validate_model_kwargs(model_kwargs.copy()) + # 1. Set generation parameters if not already defined length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping diff --git a/tests/generation/test_generation_tf_utils.py b/tests/generation/test_generation_tf_utils.py new file mode 100644 index 00000000000000..d0d284182b53c3 --- /dev/null +++ b/tests/generation/test_generation_tf_utils.py @@ -0,0 +1,183 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Team Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a clone of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import tempfile +import unittest + +from transformers import is_tf_available +from transformers.testing_utils import require_tf, slow + + +if is_tf_available(): + import tensorflow as tf + + from transformers import AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeq2SeqLM, tf_top_k_top_p_filtering + + +@require_tf +class UtilsFunctionsTest(unittest.TestCase): + + # tests whether the top_k_top_p_filtering function behaves as expected + def test_top_k_top_p_filtering(self): + logits = tf.convert_to_tensor( + [ + [ + 8.2220991, # 3rd highest value; idx. 0 + -0.5620044, + 5.23229752, + 4.0386393, + -6.8798378, + -0.54785802, + -3.2012153, + 2.92777176, + 1.88171953, + 7.35341276, # 5th highest value; idx. 9 + 8.43207833, # 2nd highest value; idx. 10 + -9.85711836, + -5.96209236, + -1.13039161, + -7.1115294, + -0.8369633, + -5.3186408, + 7.06427407, + 0.81369344, + -0.82023817, + -5.9179796, + 0.58813443, + -6.99778438, + 4.71551189, + -0.18771637, + 7.44020759, # 4th highest value; idx. 25 + 9.38450987, # 1st highest value; idx. 26 + 2.12662941, + -9.32562038, + 2.35652522, + ], # cummulative prob of 5 highest values <= 0.6 + [ + 0.58425518, + 4.53139238, + -5.57510464, + -6.28030699, + -7.19529503, + -4.02122551, + 1.39337037, + -6.06707057, + 1.59480517, + -9.643119, + 0.03907799, + 0.67231762, + -8.88206726, + 6.27115922, # 4th highest value; idx. 13 + 2.28520723, + 4.82767506, + 4.30421368, + 8.8275313, # 2nd highest value; idx. 17 + 5.44029958, # 5th highest value; idx. 18 + -4.4735794, + 7.38579536, # 3rd highest value; idx. 20 + -2.91051663, + 2.61946077, + -2.5674762, + -9.48959302, + -4.02922645, + -1.35416918, + 9.67702323, # 1st highest value; idx. 27 + -5.89478553, + 1.85370467, + ], # cummulative prob of 5 highest values <= 0.6 + ], + dtype=tf.float32, + ) + + non_inf_expected_idx = tf.convert_to_tensor( + [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], + dtype=tf.int32, + ) # expected non filtered idx as noted above + + non_inf_expected_output = tf.convert_to_tensor( + [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], + dtype=tf.float32, + ) # expected non filtered values as noted above + + output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) + + non_inf_output = output[output != -float("inf")] + non_inf_idx = tf.cast( + tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))), + dtype=tf.int32, + ) + + tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12) + tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx) + + +@require_tf +class TFGenerationIntegrationTests(unittest.TestCase): + @slow + def test_generate_tf_function_export(self): + test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") + max_length = 2 + + class DummyModel(tf.Module): + def __init__(self, model): + super(DummyModel, self).__init__() + self.model = model + + @tf.function( + input_signature=( + tf.TensorSpec((None, max_length), tf.int32, name="input_ids"), + tf.TensorSpec((None, max_length), tf.int32, name="attention_mask"), + ), + jit_compile=True, + ) + def serving(self, input_ids, attention_mask): + outputs = self.model.generate( + input_ids=input_ids, + attention_mask=attention_mask, + max_new_tokens=max_length, + return_dict_in_generate=True, + ) + return {"sequences": outputs["sequences"]} + + dummy_input_ids = [[2, 0], [102, 103]] + dummy_attention_masks = [[1, 0], [1, 1]] + dummy_model = DummyModel(model=test_model) + with tempfile.TemporaryDirectory() as tmp_dir: + tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) + serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] + for batch_size in range(1, len(dummy_input_ids) + 1): + inputs = { + "input_ids": tf.constant(dummy_input_ids[:batch_size]), + "attention_mask": tf.constant(dummy_attention_masks[:batch_size]), + } + tf_func_outputs = serving_func(**inputs)["sequences"] + tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_length) + tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) + + def test_validate_generation_inputs(self): + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + model = TFAutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") + + encoder_input_str = "Hello world" + input_ids = tokenizer(encoder_input_str, return_tensors="tf").input_ids + + # typos are quickly detected (the correct argument is `do_sample`) + with self.assertRaisesRegex(ValueError, "do_samples"): + model.generate(input_ids, do_samples=True) + + # arbitrary arguments that will not be used anywhere are also not accepted + with self.assertRaisesRegex(ValueError, "foo"): + fake_model_kwargs = {"foo": "bar"} + model.generate(input_ids, **fake_model_kwargs) diff --git a/tests/generation/test_generation_utils.py b/tests/generation/test_generation_utils.py index 62a3f588cf471b..e8cb57ccf3c97d 100644 --- a/tests/generation/test_generation_utils.py +++ b/tests/generation/test_generation_utils.py @@ -2704,8 +2704,8 @@ def test_constrained_beam_search_mixin_type_checks(self): model.generate(input_ids, force_words_ids=[[[-1]]]) def test_validate_generation_inputs(self): - tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") - model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") + tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") + model = AutoModelForSeq2SeqLM.from_pretrained("hf-internal-testing/tiny-random-t5") encoder_input_str = "Hello world" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index a0413afdfa0ec9..f3608f4b225d86 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -75,11 +75,9 @@ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, BertConfig, TFAutoModel, - TFAutoModelForCausalLM, TFAutoModelForSequenceClassification, TFBertModel, TFSharedEmbeddings, - tf_top_k_top_p_filtering, ) from transformers.generation_tf_utils import ( TFBeamSampleDecoderOnlyOutput, @@ -1824,100 +1822,6 @@ def floats_tensor(shape, scale=1.0, rng=None, name=None, dtype=None): @require_tf class UtilsFunctionsTest(unittest.TestCase): - - # tests whether the top_k_top_p_filtering function behaves as expected - def test_top_k_top_p_filtering(self): - logits = tf.convert_to_tensor( - [ - [ - 8.2220991, # 3rd highest value; idx. 0 - -0.5620044, - 5.23229752, - 4.0386393, - -6.8798378, - -0.54785802, - -3.2012153, - 2.92777176, - 1.88171953, - 7.35341276, # 5th highest value; idx. 9 - 8.43207833, # 2nd highest value; idx. 10 - -9.85711836, - -5.96209236, - -1.13039161, - -7.1115294, - -0.8369633, - -5.3186408, - 7.06427407, - 0.81369344, - -0.82023817, - -5.9179796, - 0.58813443, - -6.99778438, - 4.71551189, - -0.18771637, - 7.44020759, # 4th highest value; idx. 25 - 9.38450987, # 1st highest value; idx. 26 - 2.12662941, - -9.32562038, - 2.35652522, - ], # cummulative prob of 5 highest values <= 0.6 - [ - 0.58425518, - 4.53139238, - -5.57510464, - -6.28030699, - -7.19529503, - -4.02122551, - 1.39337037, - -6.06707057, - 1.59480517, - -9.643119, - 0.03907799, - 0.67231762, - -8.88206726, - 6.27115922, # 4th highest value; idx. 13 - 2.28520723, - 4.82767506, - 4.30421368, - 8.8275313, # 2nd highest value; idx. 17 - 5.44029958, # 5th highest value; idx. 18 - -4.4735794, - 7.38579536, # 3rd highest value; idx. 20 - -2.91051663, - 2.61946077, - -2.5674762, - -9.48959302, - -4.02922645, - -1.35416918, - 9.67702323, # 1st highest value; idx. 27 - -5.89478553, - 1.85370467, - ], # cummulative prob of 5 highest values <= 0.6 - ], - dtype=tf.float32, - ) - - non_inf_expected_idx = tf.convert_to_tensor( - [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]], - dtype=tf.int32, - ) # expected non filtered idx as noted above - - non_inf_expected_output = tf.convert_to_tensor( - [8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023], - dtype=tf.float32, - ) # expected non filtered values as noted above - - output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) - - non_inf_output = output[output != -float("inf")] - non_inf_idx = tf.cast( - tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))), - dtype=tf.int32, - ) - - tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12) - tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx) - def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() @@ -2179,46 +2083,6 @@ def test_checkpoint_sharding_local(self): for p1, p2 in zip(model.weights, new_model.weights): self.assertTrue(np.allclose(p1.numpy(), p2.numpy())) - def test_generate_tf_function_export(self): - test_model = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2") - max_length = 2 - - class DummyModel(tf.Module): - def __init__(self, model): - super(DummyModel, self).__init__() - self.model = model - - @tf.function( - input_signature=( - tf.TensorSpec((None, max_length), tf.int32, name="input_ids"), - tf.TensorSpec((None, max_length), tf.int32, name="attention_mask"), - ), - jit_compile=True, - ) - def serving(self, input_ids, attention_mask): - outputs = self.model.generate( - input_ids=input_ids, - attention_mask=attention_mask, - max_new_tokens=max_length, - return_dict_in_generate=True, - ) - return {"sequences": outputs["sequences"]} - - dummy_input_ids = [[2, 0], [102, 103]] - dummy_attention_masks = [[1, 0], [1, 1]] - dummy_model = DummyModel(model=test_model) - with tempfile.TemporaryDirectory() as tmp_dir: - tf.saved_model.save(dummy_model, tmp_dir, signatures={"serving_default": dummy_model.serving}) - serving_func = tf.saved_model.load(tmp_dir).signatures["serving_default"] - for batch_size in range(1, len(dummy_input_ids) + 1): - inputs = { - "input_ids": tf.constant(dummy_input_ids[:batch_size]), - "attention_mask": tf.constant(dummy_attention_masks[:batch_size]), - } - tf_func_outputs = serving_func(**inputs)["sequences"] - tf_model_outputs = test_model.generate(**inputs, max_new_tokens=max_length) - tf.debugging.assert_equal(tf_func_outputs, tf_model_outputs) - @require_tf @is_staging_test From 4e29b3f88410a2cbb996879bc1081bcc0c6b0817 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 2 Sep 2022 17:59:26 +0200 Subject: [PATCH 208/539] A script to download artifacts and perform CI error statistics (#18865) Co-authored-by: ydshieh --- utils/get_ci_error_statistics.py | 154 +++++++++++++++++++++++++++++++ 1 file changed, 154 insertions(+) create mode 100644 utils/get_ci_error_statistics.py diff --git a/utils/get_ci_error_statistics.py b/utils/get_ci_error_statistics.py new file mode 100644 index 00000000000000..bca425663b9d06 --- /dev/null +++ b/utils/get_ci_error_statistics.py @@ -0,0 +1,154 @@ +import argparse +import json +import math +import os +import subprocess +import time +import zipfile +from collections import Counter + +import requests + + +def get_artifacts_links(worflow_run_id): + """Get all artifact links from a workflow run""" + + url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" + result = requests.get(url).json() + artifacts = {} + + try: + artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]}) + pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) + + for i in range(pages_to_iterate_over): + result = requests.get(url + f"&page={i + 2}").json() + artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]}) + + return artifacts + except Exception as e: + print("Unknown error, could not fetch links.", e) + + return {} + + +def download_artifact(artifact_name, artifact_url, output_dir, token): + """Download a GitHub Action artifact from a URL. + + The URL is of the from `https://api.github.com/repos/huggingface/transformers/actions/artifacts/{ARTIFACT_ID}/zip`, + but it can't be used to download directly. We need to get a redirect URL first. + See https://docs.github.com/en/rest/actions/artifacts#download-an-artifact + """ + # Get the redirect URL first + cmd = f'curl -v -H "Accept: application/vnd.github+json" -H "Authorization: token {token}" {artifact_url}' + output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + o = output.stdout.decode("utf-8") + lines = o.splitlines() + + for line in lines: + if line.startswith("< Location: "): + redirect_url = line[len("< Location: ") :] + r = requests.get(redirect_url, allow_redirects=True) + p = os.path.join(output_dir, f"{artifact_name}.zip") + open(p, "wb").write(r.content) + break + + +def get_errors_from_single_artifact(artifact_zip_path): + """Extract errors from a downloaded artifact (in .zip format)""" + errors = [] + failed_tests = [] + + with zipfile.ZipFile(artifact_zip_path) as z: + for filename in z.namelist(): + if not os.path.isdir(filename): + # read the file + if filename in ["failures_line.txt", "summary_short.txt"]: + with z.open(filename) as f: + for line in f: + line = line.decode("UTF-8").strip() + if filename == "failures_line.txt": + try: + # `error_line` is the place where `error` occurs + error_line = line[: line.index(": ")] + error = line[line.index(": ") + len(": ") :] + errors.append([error_line, error]) + except Exception: + # skip un-related lines + pass + elif filename == "summary_short.txt" and line.startswith("FAILED "): + # `test` is the test method that failed + test = line[len("FAILED ") :] + failed_tests.append(test) + + if len(errors) != len(failed_tests): + raise ValueError( + f"`errors` and `failed_tests` should have the same number of elements. Got {len(errors)} for `errors` " + f"and {len(failed_tests)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" + " problem." + ) + + return errors, failed_tests + + +def get_all_errors(artifact_dir): + """Extract errors from all artifact files""" + + errors = [] + failed_tests = [] + + paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if p.endswith(".zip")] + + for p in paths: + _errors, _failed_tests = get_errors_from_single_artifact(p) + errors.extend(_errors) + failed_tests.extend(_failed_tests) + + return errors, failed_tests + + +if __name__ == "__main__": + + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--workflow_run_id", default=None, type=str, required=True, help="A GitHub Actions workflow run id." + ) + parser.add_argument( + "--output_dir", + default=None, + type=str, + required=True, + help="Where to store the downloaded artifacts and other result files.", + ) + parser.add_argument( + "--token", default=None, type=str, required=True, help="A token that has actions:read permission." + ) + args = parser.parse_args() + + os.makedirs(args.output_dir, exist_ok=True) + + artifacts = get_artifacts_links(args.workflow_run_id) + with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: + json.dump(artifacts, fp, ensure_ascii=False, indent=4) + + for idx, (name, url) in enumerate(artifacts.items()): + download_artifact(name, url, args.output_dir, args.token) + # Be gentle to GitHub + time.sleep(1) + + errors, failed_tests = get_all_errors(args.output_dir) + + counter = Counter() + counter.update([e[1] for e in errors]) + + # print the top 30 most common test errors + most_common = counter.most_common(30) + for item in most_common: + print(item) + + with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: + json.dump(errors, fp, ensure_ascii=False, indent=4) + + with open(os.path.join(args.output_dir, "failed_tests.json"), "w", encoding="UTF-8") as fp: + json.dump(failed_tests, fp, ensure_ascii=False, indent=4) From ecdf9b06bc03af272ceb8d6951e30e677fdfd35c Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 2 Sep 2022 18:17:58 +0200 Subject: [PATCH 209/539] Remove cached torch_extensions on CI runners (#18868) Co-authored-by: ydshieh --- .github/workflows/self-nightly-scheduled.yml | 5 ++++- .github/workflows/self-push.yml | 10 ++++++++-- .github/workflows/self-scheduled.yml | 5 ++++- .../Dockerfile | 2 +- .../Dockerfile | 2 +- 5 files changed, 18 insertions(+), 6 deletions(-) diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index 78b14463b31a9a..1cdbd6982b71f7 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -179,6 +179,9 @@ jobs: working-directory: /workspace/transformers run: git fetch && git checkout ${{ github.sha }} + - name: Remove cached torch extensions + run: rm -rf /github/home/.cache/torch_extensions/ + # To avoid unknown test failures - name: Pre build DeepSpeed *again* working-directory: /workspace @@ -186,7 +189,7 @@ jobs: python3 -m pip uninstall -y deepspeed rm -rf DeepSpeed git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build - DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check + DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check - name: NVIDIA-SMI run: | diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index dd1222609d811c..f2f88ad1b9a849 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -343,12 +343,15 @@ jobs: git checkout ${{ env.CI_SHA }} echo "log = $(git log -n 1)" + - name: Remove cached torch extensions + run: rm -rf /github/home/.cache/torch_extensions/ + # To avoid unknown test failures - name: Pre build DeepSpeed *again* working-directory: /workspace run: | python3 -m pip uninstall -y deepspeed - DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check + DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check - name: NVIDIA-SMI run: | @@ -422,12 +425,15 @@ jobs: git checkout ${{ env.CI_SHA }} echo "log = $(git log -n 1)" + - name: Remove cached torch extensions + run: rm -rf /github/home/.cache/torch_extensions/ + # To avoid unknown test failures - name: Pre build DeepSpeed *again* working-directory: /workspace run: | python3 -m pip uninstall -y deepspeed - DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check + DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check - name: NVIDIA-SMI run: | diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 4d8feba32a54cc..34380faa4eca1f 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -306,12 +306,15 @@ jobs: working-directory: /workspace/transformers run: git fetch && git checkout ${{ github.sha }} + - name: Remove cached torch extensions + run: rm -rf /github/home/.cache/torch_extensions/ + # To avoid unknown test failures - name: Pre build DeepSpeed *again* working-directory: /workspace run: | python3 -m pip uninstall -y deepspeed - DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check + DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check - name: NVIDIA-SMI run: | diff --git a/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile b/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile index 2b3292f350d71d..f5e29175b41d3f 100644 --- a/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile +++ b/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile @@ -26,7 +26,7 @@ RUN python3 -m pip uninstall -y deepspeed # This has to be run (again) inside the GPU VMs running the tests. # The installation works here, but some tests fail, if we don't pre-build deepspeed again in the VMs running the tests. # TODO: Find out why test fail. -RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 +RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. diff --git a/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile b/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile index 3f880dd95dcfff..1854d9f4b38d48 100644 --- a/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile +++ b/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile @@ -25,7 +25,7 @@ RUN python3 -m pip uninstall -y deepspeed # This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.) # Issue: https://github.com/microsoft/DeepSpeed/issues/2010 # RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \ -# DS_BUILD_CPU_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 +# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. From 53e33e6f1be980353e8cd3f75c7cf1e58720f557 Mon Sep 17 00:00:00 2001 From: Jason Phang Date: Fri, 2 Sep 2022 13:54:02 -0400 Subject: [PATCH 210/539] PEGASUS-X (#18551) * PegasusX Initial commit * rename * pegasus X implementation * pegx update * pegx fix * pegasus-x fixes * pegx updates * cleanup * cleanup * cleanup * tests * stylefixes * Documentation update * Model hub fix * cleanup * update * update * testfix * Check fix * tweaks for merging * style * style * updates for pr * style * change pegasus-x repo --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/pegasus_x.mdx | 45 + src/transformers/__init__.py | 16 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + src/transformers/models/auto/modeling_auto.py | 3 + src/transformers/models/pegasus_x/__init__.py | 62 + .../pegasus_x/configuration_pegasus_x.py | 183 ++ .../models/pegasus_x/modeling_pegasus_x.py | 1692 +++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 24 + tests/models/pegasus_x/__init__.py | 0 .../pegasus_x/test_modeling_pegasus_x.py | 852 +++++++++ utils/check_repo.py | 9 + 18 files changed, 2898 insertions(+) create mode 100644 docs/source/en/model_doc/pegasus_x.mdx create mode 100644 src/transformers/models/pegasus_x/__init__.py create mode 100644 src/transformers/models/pegasus_x/configuration_pegasus_x.py create mode 100755 src/transformers/models/pegasus_x/modeling_pegasus_x.py create mode 100644 tests/models/pegasus_x/__init__.py create mode 100644 tests/models/pegasus_x/test_modeling_pegasus_x.py diff --git a/README.md b/README.md index 5f89bacf6415d2..2b728c212b3da2 100644 --- a/README.md +++ b/README.md @@ -339,6 +339,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/main/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. diff --git a/README_ko.md b/README_ko.md index cc0b790ad76a8d..a0df54a86c1a9b 100644 --- a/README_ko.md +++ b/README_ko.md @@ -291,6 +291,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/main/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. diff --git a/README_zh-hans.md b/README_zh-hans.md index fe2fa45f71f39f..1141748161510d 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -315,6 +315,7 @@ conda install -c huggingface transformers 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。 +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/main/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (来自 UCLA NLP) 伴随论文 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 由 Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 4f5a9954761494..c0444b8ba6a1bc 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -327,6 +327,7 @@ conda install -c huggingface transformers 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/main/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 57b53c8aa88297..afb501b96e3058 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -297,6 +297,8 @@ title: OPT - local: model_doc/pegasus title: Pegasus + - local: model_doc/pegasus_x + title: PEGASUS-X - local: model_doc/phobert title: PhoBERT - local: model_doc/plbart diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 82053b11effdda..a9bd25cffb222a 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -133,6 +133,7 @@ The library currently contains JAX, PyTorch and TensorFlow implementations, pret 1. **[OPT](master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. +1. **[PEGASUS-X](model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[PhoBERT](model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[PLBart](model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. @@ -270,6 +271,7 @@ Flax), PyTorch, and/or TensorFlow. | OPT | ❌ | ❌ | ✅ | ✅ | ✅ | | OWL-ViT | ❌ | ❌ | ✅ | ❌ | ❌ | | Pegasus | ✅ | ✅ | ✅ | ✅ | ✅ | +| PEGASUS-X | ❌ | ❌ | ✅ | ❌ | ❌ | | Perceiver | ✅ | ❌ | ✅ | ❌ | ❌ | | PLBart | ✅ | ❌ | ✅ | ❌ | ❌ | | PoolFormer | ❌ | ❌ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/pegasus_x.mdx b/docs/source/en/model_doc/pegasus_x.mdx new file mode 100644 index 00000000000000..c3527c9e01a615 --- /dev/null +++ b/docs/source/en/model_doc/pegasus_x.mdx @@ -0,0 +1,45 @@ + + +# PEGASUS-X + +## Overview + +The PEGASUS-X model was proposed in [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao and Peter J. Liu. + +PEGASUS-X (PEGASUS eXtended) extends the PEGASUS models for long input summarization through additional long input pretraining and using staggered block-local attention with global tokens in the encoder. + +The abstract from the paper is the following: + +*While large pretrained Transformer models have proven highly capable at tackling natural language tasks, handling long sequence inputs continues to be a significant challenge. One such task is long input summarization, where inputs are longer than the maximum input context of most pretrained models. Through an extensive set of experiments, we investigate what model architectural changes and pretraining paradigms can most efficiently adapt a pretrained Transformer for long input summarization. We find that a staggered, block-local Transformer with global encoder tokens strikes a good balance of performance and efficiency, and that an additional pretraining phase on long sequences meaningfully improves downstream summarization performance. Based on our findings, we introduce PEGASUS-X, an extension of the PEGASUS model with additional long input pretraining to handle inputs of up to 16K tokens. PEGASUS-X achieves strong performance on long input summarization tasks comparable with much larger models while adding few additional parameters and not requiring model parallelism to train.* + +Tips: + +* PEGASUS-X uses the same tokenizer as PEGASUS. + +This model was contributed by [zphang](>> from transformers import PegasusXModel, PegasusXConfig + + >>> # Initializing a PEGASUS google/pegasus-x-large style configuration + >>> configuration = PegasusXConfig() + + >>> # Initializing a model from the google/pegasus-x-large style configuration + >>> model = PegasusXModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "pegasus_x" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} + + def __init__( + self, + vocab_size=96103, + max_position_embeddings=16384, + encoder_layers=16, + encoder_ffn_dim=4096, + encoder_attention_heads=16, + decoder_layers=16, + decoder_ffn_dim=4096, + decoder_attention_heads=16, + encoder_layerdrop=0.0, + decoder_layerdrop=0.0, + use_cache=True, + is_encoder_decoder=True, + activation_function="gelu", + d_model=1024, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + decoder_start_token_id=0, + classifier_dropout=0.0, + scale_embedding=True, + pad_token_id=0, + eos_token_id=1, + forced_eos_token_id=1, + num_global_tokens=32, + block_size=512, + stagger_local_blocks=True, + **kwargs + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.decoder_layers = decoder_layers + self.decoder_attention_heads = decoder_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.encoder_layerdrop = encoder_layerdrop + self.decoder_layerdrop = decoder_layerdrop + self.classifier_dropout = classifier_dropout + self.use_cache = use_cache + self.num_hidden_layers = encoder_layers + self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True + + self.num_global_tokens = num_global_tokens + self.block_size = block_size + self.stagger_local_blocks = stagger_local_blocks + + super().__init__( + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + is_encoder_decoder=is_encoder_decoder, + decoder_start_token_id=decoder_start_token_id, + forced_eos_token_id=forced_eos_token_id, + **kwargs, + ) + + @property + def num_attention_heads(self) -> int: + return self.encoder_attention_heads + + @property + def hidden_size(self) -> int: + return self.d_model diff --git a/src/transformers/models/pegasus_x/modeling_pegasus_x.py b/src/transformers/models/pegasus_x/modeling_pegasus_x.py new file mode 100755 index 00000000000000..8feac435715e45 --- /dev/null +++ b/src/transformers/models/pegasus_x/modeling_pegasus_x.py @@ -0,0 +1,1692 @@ +# coding=utf-8 +# Copyright 2022, Google and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch PEGASUS-X model.""" + +import dataclasses +import math +import random +from typing import Optional, Tuple, Union + +import numpy as np +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_pegasus_x import PegasusXConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "google/pegasus-x-base" +_CONFIG_FOR_DOC = "PegasusXConfig" +_TOKENIZER_FOR_DOC = "PegasusTokenizer" + + +PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "google/pegasus-x-base", + "google/pegasus-x-large", + # See all PEGASUS models at https://huggingface.co/models?filter=pegasus-x +] + + +@dataclasses.dataclass +class DimensionInfo: + """Wrapper for dimension info.""" + + batch_size: int # batch size + seq_len: int # token length + block_size: int # block size + num_heads: int # num heads + hidden_dim: int # hidden dim + dim_per_head: int # dim per head + num_blocks: int # num blocks + global_len: int # global length + padded_seq_len: int # padded token seq length + + # Note: Compared to the original Flax implementation, we will pad the token representations to + # a multiple of block size at the start of the encoder layers, so T=P always. + + +# Copied from transformers.models.bart.modeling_bart.shift_tokens_right +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) + mask_cond = torch.arange(mask.size(-1)) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class PegasusXSinusoidalPositionalEmbedding(nn.Module): + """This module produces sinusoidal positional embeddings of any length.""" + + def __init__(self, embed_dim, max_scale: int = 10000.0): + super().__init__() + self.embed_dim = embed_dim + self.max_scale = max_scale + + @torch.no_grad() + def forward(self, input_embeds: torch.Tensor, past_key_values_length: int = 0) -> torch.Tensor: + """`input_ids_shape` is expected to be [bsz x seqlen].""" + batch_size, seq_len = input_embeds.shape[:2] + positions = torch.arange( + past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=input_embeds.device + )[:, None] + pe = torch.zeros((seq_len, self.embed_dim), device=input_embeds.device, dtype=input_embeds.dtype) + half_d_feature = self.embed_dim // 2 + div_term = torch.exp( + torch.arange(half_d_feature, device=input_embeds.device, dtype=input_embeds.dtype) + * -(np.log(float(self.max_scale)) / (half_d_feature - 1)) + ) + pe[:, :half_d_feature] = torch.sin(positions * div_term) + pe[:, half_d_feature:] = torch.cos(positions * div_term) + return pe[None].expand(batch_size, -1, -1) + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->PegasusX +class PegasusXAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned aross GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class PegasusXGlobalLocalAttention(nn.Module): + """Global + Local attention. For use with Encoder only.""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + block_size: int, + dropout: float = 0.0, + is_decoder: bool = False, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.block_size = block_size + self.dropout = dropout + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=False) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=False) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=False) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + token_hidden_states: torch.Tensor, + global_hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: + """Input shape: Batch x Time x Channel""" + dim = DimensionInfo( + batch_size=token_hidden_states.shape[0], + seq_len=token_hidden_states.shape[1], + block_size=self.block_size, + num_heads=self.num_heads, + hidden_dim=token_hidden_states.shape[2], + dim_per_head=self.head_dim, + num_blocks=token_hidden_states.shape[1] // self.block_size, + global_len=global_hidden_states.shape[1], + padded_seq_len=token_hidden_states.shape[1], + ) + + # [batch_size, num_heads, padded_seq_len, dim_per_head] + local_q = self._shape( + self.q_proj(token_hidden_states) * self.scaling, + seq_len=dim.padded_seq_len, + bsz=dim.batch_size, + ) + local_k = self._shape( + self.k_proj(token_hidden_states), + seq_len=dim.padded_seq_len, + bsz=dim.batch_size, + ) + local_v = self._shape( + self.v_proj(token_hidden_states), + seq_len=dim.padded_seq_len, + bsz=dim.batch_size, + ) + + # [batch_size, num_heads, global_len, dim_per_head] + global_q = self._shape( + self.q_proj(global_hidden_states) * self.scaling, + seq_len=dim.global_len, + bsz=dim.batch_size, + ) + global_k = self._shape( + self.k_proj(global_hidden_states), + seq_len=dim.global_len, + bsz=dim.batch_size, + ) + global_v = self._shape( + self.v_proj(global_hidden_states), + seq_len=dim.global_len, + bsz=dim.batch_size, + ) + + global_attn_output, global_attn_probs = self.compute_global_attention_representations( + global_q=global_q, + global_k=global_k, + global_v=global_v, + local_k=local_k, + local_v=local_v, + mask=attention_mask, + dim=dim, + ) + local_attn_output, local_attn_probs = self.compute_local_attention_representations( + global_k=global_k, + global_v=global_v, + local_q=local_q, + local_k=local_k, + local_v=local_v, + mask=attention_mask, + dim=dim, + ) + + # [batch_size, global_len, hidden_dim] + global_attn_output = ( + global_attn_output.transpose(1, 2).contiguous().view(dim.batch_size, dim.global_len, dim.hidden_dim) + ) + # [batch_size, global_len, hidden_dim] + global_attn_output = self.out_proj(global_attn_output) + # [batch_size, num_heads, block_size, num_heads, dim_per_head] + local_attn_output = local_attn_output.permute(0, 2, 3, 1, 4).contiguous() + # [batch_size, padded_seq_len, hidden_dim] + local_attn_output = local_attn_output.view(dim.batch_size, dim.padded_seq_len, dim.hidden_dim) + # [batch_size, padded_seq_len, hidden_dim] + local_attn_output = self.out_proj(local_attn_output) + + if output_attentions: + attn_probs = {"global": global_attn_probs, "local": local_attn_probs} + else: + attn_probs = None + + return local_attn_output, global_attn_output, attn_probs + + def compute_global_attention_representations( + self, global_q, global_k, global_v, local_k, local_v, mask, dim: DimensionInfo + ): + """Compute attention representations for global tokens. + + Global tokens will attend to both global tokens as well as all input sequence tokens. Because the input + sequence tokens are arranged in blocks for local attention, we unblock them and compute attention. + + Args: + global_q (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: + query vectors from global tokens + global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: + key vectors from global tokens + global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: + value vectors from global tokens + local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: + key vectors from local tokens + local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: + value vectors from local tokens + mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask + dim (DimensionInfo): DimensionInfo wrapper for dimensions + + Returns: + output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size + """ + # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head] + global_and_local_k = torch.cat([global_k, local_k], dim=2) + # [batch_size, num_heads, global_len+padded_seq_len, dim_per_head] + global_and_local_v = torch.cat([global_v, local_v], dim=2) + + # [batch_size, global_len+padded_seq_len] + extended_mask = nn.functional.pad(mask, pad=(dim.global_len, 0), value=0) + + # [batch_size, num_heads, global_len, global_len+padded_seq_len] + attn_weights = torch.einsum("BHGF,BHXF->BHGX", global_q, global_and_local_k) + attn_weights = attn_weights + extended_mask[:, None, None, :] + attn_probs = nn.functional.softmax(attn_weights, dim=-1) + attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) + + # [batch_size, num_heads, global_len, F] + attn_output = torch.einsum("BHGX,BHXF->BHGF", attn_probs, global_and_local_v) + return attn_output, attn_probs + + def compute_local_attention_representations( + self, global_k, global_v, local_q, local_k, local_v, mask, dim: DimensionInfo + ): + """Compute attention representations for local tokens. + + Local tokens will attend to both global tokens as well as all other tokens within the same local block. Hence, + we need to tile and concatenate the global tokens to every local block + + Args: + global_k (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: + key vectors from global tokens + global_v (`torch.FloatTensor`) of shape [batch_size, num_heads, global_len, dim_per_head]: + value vectors from global tokens + local_q (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: + query vectors from local tokens + local_k (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: + key vectors from local tokens + local_v (`torch.FloatTensor`) of shape [batch_size, num_heads, padded_seq_len, dim_per_head]: + value vectors from local tokens + mask (`torch.FloatTensor`) of shape [batch_size, padded_seq_len]: attention mask + dim (DimensionInfo): DimensionInfo wrapper for dimensions + + Returns: + output of shape `[batch_sizes, length, features]`. where length will be padded to a multiple of block_size + """ + # [batch_size, num_heads, num_blocks, block_size, dim_per_head] + blocked_local_q = local_q.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head) + # [batch_size, num_heads, num_blocks, block_size, dim_per_head] + blocked_local_k = local_k.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head) + # [batch_size, num_heads, num_blocks, block_size, dim_per_head] + blocked_local_v = local_v.view(dim.batch_size, dim.num_heads, dim.num_blocks, dim.block_size, dim.dim_per_head) + + # [batch_size, num_blocks, global_len+block_size] + extended_mask = nn.functional.pad( + mask.view(dim.batch_size, dim.num_blocks, dim.block_size), + pad=(dim.global_len, 0), + value=0, + ) + + # [batch_size, num_heads, num_blocks, block_size, global_len] + blocked_local2global = torch.einsum("BHNKF,BHGF->BHNKG", blocked_local_q, global_k) + # [batch_size, num_heads, num_blocks, block_size, block_size] + blocked_local2local = torch.einsum("BHNKF,BHNXF->BHNKX", blocked_local_q, blocked_local_k) + + # [batch_size, num_heads, num_blocks, block_size, global_len+block_size] + attn_weights = torch.cat([blocked_local2global, blocked_local2local], dim=-1) + attn_weights = attn_weights + extended_mask[:, None, :, None, :] + attn_probs = nn.functional.softmax(attn_weights, dim=-1) + attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) + + # [batch_size, num_heads, num_blocks, block_size, global_len] + local2global_attn_probs = attn_probs[:, :, :, :, : dim.global_len] + # [batch_size, num_heads, num_blocks, block_size, block_size] + local2local_attn_probs = attn_probs[:, :, :, :, dim.global_len :] + + # [batch_size, num_heads, num_blocks, block_size, dim_per_head] + local2global_attn_output = torch.einsum("BHNKG,BHGF->BHNKF", local2global_attn_probs, global_v) + # [batch_size, num_heads, num_blocks, block_size, dim_per_head] + local2local_attn_output = torch.einsum("BHNKX,BHNXF->BHNKF", local2local_attn_probs, blocked_local_v) + # [batch_size, num_heads, num_blocks, block_size, dim_per_head] + attn_output = local2global_attn_output + local2local_attn_output + return attn_output, attn_probs + + +class PegasusXEncoderLayer(nn.Module): + def __init__(self, stagger_blocks_this_layer: bool, config: PegasusXConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = PegasusXGlobalLocalAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + block_size=config.block_size, + dropout=config.attention_dropout, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.global_self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + self.stagger_blocks_this_layer = stagger_blocks_this_layer + self.block_size = config.block_size + + def forward( + self, + hidden_states: torch.Tensor, + global_hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + output_attentions: bool = False, + ) -> torch.Tensor: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)* + global_hidden_states (`torch.FloatTensor`): global token hidden states + *(seq_len, num_global_tokens, embed_dim)* + attention_mask (`torch.FloatTensor`): attention mask of size + *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + global_residual = global_hidden_states + + hidden_states = self.self_attn_layer_norm(hidden_states) + global_hidden_states = self.global_self_attn_layer_norm(global_hidden_states) + + if self.stagger_blocks_this_layer: + # Pad the blocks to simulate staggering + hidden_states, attention_mask = self.pad_local_tokens( + hidden_states=hidden_states, attention_mask=attention_mask, block_size=self.block_size + ) + + hidden_states, global_hidden_states, attn_weights = self.self_attn( + token_hidden_states=hidden_states, + global_hidden_states=global_hidden_states, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + + if self.stagger_blocks_this_layer: + # Undo the padding + hidden_states = self.unpad_local_tokens(padded_hidden_states=hidden_states, block_size=self.block_size) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training) + global_hidden_states = global_residual + global_hidden_states + + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + global_residual = global_hidden_states + global_hidden_states = self.final_layer_norm(global_hidden_states) + global_hidden_states = self.activation_fn(self.fc1(global_hidden_states)) + global_hidden_states = nn.functional.dropout( + global_hidden_states, p=self.activation_dropout, training=self.training + ) + global_hidden_states = self.fc2(global_hidden_states) + global_hidden_states = nn.functional.dropout(global_hidden_states, p=self.dropout, training=self.training) + global_hidden_states = global_residual + global_hidden_states + outputs = (hidden_states, global_hidden_states) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + @classmethod + def pad_local_tokens(cls, hidden_states, attention_mask, block_size): + # hidden_states: [batch_size, seq_len, hidden_dim] + pad_size = block_size // 2 + mask_min_value = torch.finfo(hidden_states.dtype).min + padded_hidden_states = torch.nn.functional.pad( + hidden_states, + pad=(0, 0, pad_size, pad_size), + ) + padded_mask = torch.nn.functional.pad( + attention_mask, + pad=(pad_size, pad_size), + value=mask_min_value, + ) + return padded_hidden_states, padded_mask + + @classmethod + def unpad_local_tokens(cls, padded_hidden_states, block_size): + # padded_hidden_states: [batch_size, padded seq_len, hidden_dim] + pad_size = block_size // 2 + return padded_hidden_states[:, pad_size:-pad_size, :] + + +class PegasusXDecoderLayer(nn.Module): + def __init__(self, config: PegasusXConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = PegasusXAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + bias=False, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.encoder_attn = PegasusXAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + bias=False, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> torch.Tensor: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape *(seq_len, batch, embed_dim)* + attention_mask (`torch.FloatTensor`): attention mask of size + *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape *(seq_len, batch, embed_dim)* + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values. + past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache: Whether to us KV cache for decoding + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class PegasusXPreTrainedModel(PreTrainedModel): + config_class = PegasusXConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (PegasusXDecoder, PegasusXEncoder)): + module.gradient_checkpointing = value + + +PEGASUS_X_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`PegasusXConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +PEGASUS_X_GENERATION_EXAMPLE = r""" + Summarization example: + + ```python + >>> from transformers import PegasusTokenizer, PegasusXForConditionalGeneration + + >>> model = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base") + >>> tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-x-large") + + >>> ARTICLE_TO_SUMMARIZE = ( + ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds " + ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were " + ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow." + ... ) + >>> inputs = tokenizer(ARTICLE_TO_SUMMARIZE, max_length=1024, return_tensors="pt") + + >>> # Generate Summary + >>> summary_ids = model.generate(inputs["input_ids"]) + >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "California's largest electricity provider has turned off power to hundreds of thousands of customers." + ``` +""" + +PEGASUS_X_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`PegasusTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`PegasusTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + PEGASUS-X uses the `pad_token_id` as the starting token for `decoder_input_ids` generation. If + `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape + `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you + can choose to directly pass an embedded representation. This is useful if you want more control over how to + convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class PegasusXEncoder(PegasusXPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`PegasusXEncoderLayer`]. + + Args: + config: PegasusXConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + + embed_dim = config.d_model + self.max_source_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 + + if embed_tokens is not None: + self.embed_tokens = embed_tokens + else: + self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim) + + self.embed_global = nn.Embedding(config.num_global_tokens, embed_dim) + self.embed_positions = PegasusXSinusoidalPositionalEmbedding(embed_dim) + self.layers = nn.ModuleList( + [ + PegasusXEncoderLayer( + stagger_blocks_this_layer=i % 2 == 1 and config.stagger_local_blocks, config=config + ) + for i in range(config.encoder_layers) + ] + ) + self.layer_norm = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def resize_position_embeddings(self, new_num_position_embeddings: int): + """ + Resizes position embeddings matrix of the model if `new_num_position_embeddings != + config.max_position_embeddings`. + + Arguments: + new_num_position_embeddings (`int`): + The number of new position embeddings. If position embeddings are learned, increasing the size will add + newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If + position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will + add correct vectors at the end following the position encoding algorithm, whereas reducing the size + will remove vectors from the end. + """ + logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...") + self.config.max_position_embeddings = new_num_position_embeddings + + self.embed_positions = PegasusXSinusoidalPositionalEmbedding(self.config.d_model) + self.embed_positions.to(self.device) + + def get_position_embeddings(self) -> nn.Embedding: + """ + Returns the position embeddings matrix + """ + return self.embed_positions + + def forward( + self, + input_ids=None, + attention_mask=None, + inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`PegasusTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + embed_pos = self.embed_positions(inputs_embeds) + + hidden_states = inputs_embeds + embed_pos + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + batch_size, seq_len, _ = hidden_states.shape + + # Setup mask + if attention_mask is None: + attention_mask = torch.ones(*input_shape, dtype=inputs_embeds.dtype, device=inputs_embeds.device) + attention_mask = attention_mask.to(dtype=hidden_states.dtype) + mask_min_value = torch.finfo(hidden_states.dtype).min + inverted_mask = 1.0 - attention_mask + attention_mask = inverted_mask.masked_fill( + inverted_mask.to(torch.bool), + mask_min_value, + ) + + # padding to block_size + if seq_len % self.config.block_size != 0: + pad_len = self.config.block_size - seq_len % self.config.block_size + hidden_states = nn.functional.pad(hidden_states, pad=(0, 0, 0, pad_len), value=0) + attention_mask = nn.functional.pad(attention_mask, pad=(0, pad_len), value=mask_min_value) + + # Global tokens + global_hidden_states = self.embed_global( + torch.arange(self.config.num_global_tokens, device=hidden_states.device)[None].expand(batch_size, -1) + ) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): # skip the layer + layer_outputs = (None, None) + else: + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + global_hidden_states, + attention_mask, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + global_hidden_states, + attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + global_hidden_states = layer_outputs[1] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[2],) + + # Undo padding-to-block-size + hidden_states = hidden_states[:, :seq_len] + + hidden_states = self.layer_norm(hidden_states) + + if output_hidden_states: + encoder_states = encoder_states + ((hidden_states, global_hidden_states),) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class PegasusXDecoder(PegasusXPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`PegasusDecoderLayer`] + + Args: + config: PegasusXConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: PegasusXConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + self.max_target_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 + + if embed_tokens is not None: + self.embed_tokens = embed_tokens + else: + self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model) + + self.embed_positions = PegasusXSinusoidalPositionalEmbedding(config.d_model) + self.layers = nn.ModuleList([PegasusXDecoderLayer(config) for _ in range(config.decoder_layers)]) + self.layer_norm = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length + ).to(inputs_embeds.device) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + def resize_position_embeddings(self, new_num_position_embeddings: int): + """ + Resizes position embeddings matrix of the model if `new_num_position_embeddings != + config.max_position_embeddings`. + + Arguments: + new_num_position_embeddings (`int`): + The number of new position embeddings. If position embeddings are learned, increasing the size will add + newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If + position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will + add correct vectors at the end following the position encoding algorithm, whereas reducing the size + will remove vectors from the end. + """ + logger.info(f"Setting `config.max_position_embeddings={new_num_position_embeddings}`...") + self.config.max_position_embeddings = new_num_position_embeddings + + self.embed_positions = PegasusXSinusoidalPositionalEmbedding(self.config.d_model) + self.embed_positions.to(self.device) + + def get_position_embeddings(self) -> nn.Embedding: + """ + Returns the position embeddings matrix + """ + return self.embed_positions + + def forward( + self, + input_ids=None, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`PegasusTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of + shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing + `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more + control over how to convert `input_ids` indices into associated vectors than the model's internal + embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + + # embed positions + positions = self.embed_positions(inputs_embeds, past_key_values_length) + + hidden_states = inputs_embeds + positions + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, use_cache) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + None, + ) + else: + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + hidden_states = self.layer_norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare PEGASUS-X Model outputting raw hidden-states without any specific head on top.", + PEGASUS_X_START_DOCSTRING, +) +class PegasusXModel(PegasusXPreTrainedModel): + def __init__(self, config: PegasusXConfig): + super().__init__(config) + + vocab_size = config.vocab_size + self.shared = nn.Embedding(vocab_size, config.d_model) + + self.encoder = PegasusXEncoder(config, self.shared) + self.decoder = PegasusXDecoder(config, self.shared) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, value): + self.shared = value + self.encoder.embed_tokens = self.shared + self.decoder.embed_tokens = self.shared + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def resize_position_embeddings(self, new_num_position_embeddings: int): + """ + Resizes position embeddings matrix of the model if `new_num_position_embeddings != + config.max_position_embeddings`. + + Arguments: + new_num_position_embeddings (`int`): + The number of new position embeddings. If position embeddings are learned, increasing the size will add + newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If + position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will + add correct vectors at the end following the position encoding algorithm, whereas reducing the size + will remove vectors from the end. + """ + self.config.max_position_embeddings = new_num_position_embeddings + self.encoder.resize_position_embeddings(new_num_position_embeddings) + self.decoder.resize_position_embeddings(new_num_position_embeddings) + + def get_position_embeddings(self) -> Tuple[nn.Embedding]: + """ + Returns the position embeddings matrix + """ + return (self.encoder.get_position_embeddings(), self.decoder.get_position_embeddings()) + + @add_start_docstrings_to_model_forward(PEGASUS_X_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.Tensor] = None, + decoder_attention_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + decoder_inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import PegasusTokenizer, PegasusModel + + >>> tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-x-large") + >>> model = PegasusModel.from_pretrained("google/pegasus-x-large") + + >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt") + >>> decoder_inputs = tokenizer("Studies show that", return_tensors="pt") + >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_inputs.input_ids) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + [1, 4, 1024] + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return Seq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings("The PEGASUS-X for conditional generation (e.g. summarization).", PEGASUS_X_START_DOCSTRING) +class PegasusXForConditionalGeneration(PegasusXPreTrainedModel): + base_model_prefix = "model" + _keys_to_ignore_on_load_missing = [ + r"encoder.version", + r"decoder.version", + r"lm_head.weight", + r"embed_positions.weight", + ] + + def __init__(self, config: PegasusXConfig): + super().__init__(config) + self.model = PegasusXModel(config) + self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_encoder(self): + return self.model.get_encoder() + + def get_decoder(self): + return self.model.get_decoder() + + def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: + new_embeddings = super().resize_token_embeddings(new_num_tokens) + return new_embeddings + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def resize_position_embeddings(self, new_num_position_embeddings: int): + """ + Resizes position embeddings matrix of the model if `new_num_position_embeddings != + config.max_position_embeddings`. + + Arguments: + new_num_position_embeddings (`int`): + The number of new position embeddings. If position embeddings are learned, increasing the size will add + newly initialized vectors at the end, whereas reducing the size will remove vectors from the end. If + position embeddings are not learned (*e.g.* sinusoidal position embeddings), increasing the size will + add correct vectors at the end following the position encoding algorithm, whereas reducing the size + will remove vectors from the end. + """ + self.config.max_position_embeddings = new_num_position_embeddings + self.model.encoder.resize_position_embeddings(new_num_position_embeddings) + self.model.decoder.resize_position_embeddings(new_num_position_embeddings) + + def get_position_embeddings(self) -> Tuple[nn.Embedding]: + """ + Returns the position embeddings matrix + """ + return (self.model.encoder.get_position_embeddings(), self.model.decoder.get_position_embeddings()) + + @add_start_docstrings_to_model_forward(PEGASUS_X_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @add_end_docstrings(PEGASUS_X_GENERATION_EXAMPLE) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.Tensor] = None, + decoder_attention_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + decoder_inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if use_cache: + logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") + use_cache = False + if decoder_input_ids is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + outputs = self.model( + input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + encoder_outputs=encoder_outputs, + decoder_attention_mask=decoder_attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + lm_logits = self.lm_head(outputs[0]) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return Seq2SeqLMOutput( + loss=masked_lm_loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + def prepare_inputs_for_generation( + self, decoder_input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs + ): + # cut decoder_input_ids if past is used + if past is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "use_cache": use_cache, # change this to avoid caching (presumably for debugging) + } + + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) + + @staticmethod + def _reorder_cache(past, beam_idx): + reordered_past = () + for layer_past in past: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], + ) + return reordered_past + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->PegasusX +class PegasusXDecoderWrapper(PegasusXPreTrainedModel): + """ + This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is + used in combination with the [`EncoderDecoderModel`] framework. + """ + + def __init__(self, config): + super().__init__(config) + self.decoder = PegasusXDecoder(config) + + def forward(self, *args, **kwargs): + return self.decoder(*args, **kwargs) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 9c0db79e44b8df..32ba979f78b62b 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -3587,6 +3587,30 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class PegasusXForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusXModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class PegasusXPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/pegasus_x/__init__.py b/tests/models/pegasus_x/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/pegasus_x/test_modeling_pegasus_x.py b/tests/models/pegasus_x/test_modeling_pegasus_x.py new file mode 100644 index 00000000000000..17518ebe993ddc --- /dev/null +++ b/tests/models/pegasus_x/test_modeling_pegasus_x.py @@ -0,0 +1,852 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch PEGASUS-X model. """ + + +import copy +import math +import tempfile +import unittest + +from transformers import is_torch_available +from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device +from transformers.utils import cached_property + +from ...generation.test_generation_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, ids_tensor + + +if is_torch_available(): + import torch + + from transformers import PegasusTokenizer, PegasusXConfig, PegasusXForConditionalGeneration, PegasusXModel + from transformers.models.pegasus_x.modeling_pegasus_x import PegasusXDecoder, PegasusXEncoder + + +def prepare_pegasus_x_inputs_dict( + config, + input_ids, + decoder_input_ids, + attention_mask=None, + decoder_attention_mask=None, +): + if attention_mask is None: + attention_mask = input_ids.ne(config.pad_token_id) + if decoder_attention_mask is None: + decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) + return { + "input_ids": input_ids, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "decoder_attention_mask": attention_mask, + } + + +@require_torch +class PegasusXModelTester: + def __init__( + self, + parent, + batch_size=13, + seq_length=7, + is_training=True, + use_labels=False, + vocab_size=99, + hidden_size=16, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=4, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=20, + eos_token_id=2, + pad_token_id=1, + bos_token_id=0, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.eos_token_id = eos_token_id + self.pad_token_id = pad_token_id + self.bos_token_id = bos_token_id + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( + 3, + ) + input_ids[:, -1] = self.eos_token_id # Eos Token + + decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + config = PegasusXConfig( + vocab_size=self.vocab_size, + d_model=self.hidden_size, + encoder_layers=self.num_hidden_layers, + decoder_layers=self.num_hidden_layers, + encoder_attention_heads=self.num_attention_heads, + decoder_attention_heads=self.num_attention_heads, + encoder_ffn_dim=self.intermediate_size, + decoder_ffn_dim=self.intermediate_size, + dropout=self.hidden_dropout_prob, + attention_dropout=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + eos_token_id=self.eos_token_id, + bos_token_id=self.bos_token_id, + pad_token_id=self.pad_token_id, + stagger_local_blocks=False, + ) + inputs_dict = prepare_pegasus_x_inputs_dict(config, input_ids, decoder_input_ids) + return config, inputs_dict + + def prepare_config_and_inputs_for_common(self): + config, inputs_dict = self.prepare_config_and_inputs() + return config, inputs_dict + + def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): + model = PegasusXModel(config=config).get_decoder().to(torch_device).eval() + input_ids = inputs_dict["input_ids"] + attention_mask = inputs_dict["attention_mask"] + + # first forward pass + outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) + + output, past_key_values = outputs.to_tuple() + + # create hypothetical multiple next token and extent to next_input_ids + next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) + next_attn_mask = ids_tensor((self.batch_size, 3), 2) + + # append to next input_ids and + next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) + next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) + + output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] + output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ + "last_hidden_state" + ] + + # select random slice + random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() + output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() + output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() + + self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) + + # test that outputs are equal for slice + self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) + + def check_encoder_decoder_model_standalone(self, config, inputs_dict): + model = PegasusXModel(config=config).to(torch_device).eval() + outputs = model(**inputs_dict) + + encoder_last_hidden_state = outputs.encoder_last_hidden_state + last_hidden_state = outputs.last_hidden_state + + with tempfile.TemporaryDirectory() as tmpdirname: + encoder = model.get_encoder() + encoder.save_pretrained(tmpdirname) + encoder = PegasusXEncoder.from_pretrained(tmpdirname).to(torch_device) + + encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ + 0 + ] + + self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) + + with tempfile.TemporaryDirectory() as tmpdirname: + decoder = model.get_decoder() + decoder.save_pretrained(tmpdirname) + decoder = PegasusXDecoder.from_pretrained(tmpdirname).to(torch_device) + + last_hidden_state_2 = decoder( + input_ids=inputs_dict["decoder_input_ids"], + attention_mask=inputs_dict["decoder_attention_mask"], + encoder_hidden_states=encoder_last_hidden_state, + encoder_attention_mask=inputs_dict["attention_mask"], + )[0] + + self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) + + +@require_torch +class PegasusXModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = (PegasusXModel, PegasusXForConditionalGeneration) if is_torch_available() else () + all_generative_model_classes = (PegasusXForConditionalGeneration,) if is_torch_available() else () + is_encoder_decoder = True + test_pruning = False + test_head_masking = False + test_missing_keys = False + + def setUp(self): + self.model_tester = PegasusXModelTester(self) + self.config_tester = ConfigTester(self, config_class=PegasusXConfig) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_save_load_strict(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs() + for model_class in self.all_model_classes: + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) + self.assertEqual(info["missing_keys"], []) + + def test_decoder_model_past_with_large_inputs(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) + + def test_encoder_decoder_model_standalone(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() + self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) + + def test_inputs_embeds(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in (PegasusXModel, PegasusXForConditionalGeneration): + model = model_class(config) + model.to(torch_device) + model.eval() + + inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) + + if not self.is_encoder_decoder: + input_ids = inputs["input_ids"] + del inputs["input_ids"] + else: + encoder_input_ids = inputs["input_ids"] + decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) + del inputs["input_ids"] + inputs.pop("decoder_input_ids", None) + + wte = model.get_input_embeddings() + if not self.is_encoder_decoder: + inputs["inputs_embeds"] = wte(input_ids) + else: + inputs["inputs_embeds"] = wte(encoder_input_ids) + inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) + + with torch.no_grad(): + model(**inputs)[0] + + def test_generate_fp16(self): + config, input_dict = self.model_tester.prepare_config_and_inputs() + input_ids = input_dict["input_ids"] + attention_mask = input_ids.ne(1).to(torch_device) + model = PegasusXForConditionalGeneration(config).eval().to(torch_device) + if torch_device == "cuda": + model.half() + model.generate(input_ids, attention_mask=attention_mask) + model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + seq_len = getattr(self.model_tester, "seq_length", None) + decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) + encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) + decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) + encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) + chunk_length = getattr(self.model_tester, "chunk_length", None) + if chunk_length is not None and hasattr(self.model_tester, "num_hashes"): + encoder_seq_length = encoder_seq_length * self.model_tester.num_hashes + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + self.assertListEqual( + list(attentions[0]["local"].shape[-4:]), + [ + self.model_tester.num_attention_heads, + math.ceil(encoder_seq_length / model.config.block_size), + model.config.block_size, + model.config.block_size + model.config.num_global_tokens, + ], + ) + out_len = len(outputs) + + if self.is_encoder_decoder: + correct_outlen = 5 + + # loss is at first position + if "labels" in inputs_dict: + correct_outlen += 1 # loss is added to beginning + if "past_key_values" in outputs: + correct_outlen += 1 # past_key_values have been returned + + self.assertEqual(out_len, correct_outlen) + + # decoder attentions + decoder_attentions = outputs.decoder_attentions + self.assertIsInstance(decoder_attentions, (list, tuple)) + self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(decoder_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], + ) + + # cross attentions + cross_attentions = outputs.cross_attentions + self.assertIsInstance(cross_attentions, (list, tuple)) + self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(cross_attentions[0].shape[-3:]), + [ + self.model_tester.num_attention_heads, + decoder_seq_length, + encoder_key_length, + ], + ) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if hasattr(self.model_tester, "num_hidden_states_types"): + added_hidden_states = self.model_tester.num_hidden_states_types + elif self.is_encoder_decoder: + added_hidden_states = 2 + else: + added_hidden_states = 1 + self.assertEqual(out_len + added_hidden_states, len(outputs)) + + self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + + self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(self_attentions[0]["local"].shape[-4:]), + [ + self.model_tester.num_attention_heads, + math.ceil(encoder_seq_length / model.config.block_size), + model.config.block_size, + model.config.block_size + model.config.num_global_tokens, + ], + ) + + def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): + encoder_expected_shape = ( + batch_size, + config.num_attention_heads, + math.ceil(seq_length / config.block_size), + config.block_size, + config.block_size + config.num_global_tokens, + ) + self.assertIsInstance(attentions, tuple) + self.assertListEqual( + [layer_attentions["local"].shape for layer_attentions in attentions], + [encoder_expected_shape] * len(attentions), + ) + + def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): + encoder_expected_shape = (batch_size, self.round_up(seq_length, config.block_size), config.hidden_size) + self.assertIsInstance(hidden_states, tuple) + # Only the last layer will have the hidden states truncated back to token level + self.assertListEqual( + [layer_hidden_states.shape for layer_hidden_states in hidden_states[:-1]], + [encoder_expected_shape] * (len(hidden_states) - 1), + ) + # Only the last layer will have the hidden states truncated back to token level + self.assertEqual( + hidden_states[-1][0].shape, + (batch_size, seq_length, config.hidden_size), + ) + + def test_hidden_states_output(self): + def _check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states + + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 + ) + self.assertEqual(len(hidden_states), expected_num_layers) + + if hasattr(self.model_tester, "encoder_seq_length"): + seq_length = self.model_tester.encoder_seq_length + if hasattr(self.model_tester, "chunk_length") and self.model_tester.chunk_length > 1: + seq_length = seq_length * self.model_tester.chunk_length + else: + seq_length = self.model_tester.seq_length + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [self.round_up(seq_length, config.block_size), self.model_tester.hidden_size], + ) + + if config.is_encoder_decoder: + hidden_states = outputs.decoder_hidden_states + + self.assertIsInstance(hidden_states, (list, tuple)) + self.assertEqual(len(hidden_states), expected_num_layers) + seq_len = getattr(self.model_tester, "seq_length", None) + decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [decoder_seq_length, self.model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + _check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + _check_hidden_states_output(inputs_dict, config, model_class) + + def test_retain_grad_hidden_states_attentions(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.output_hidden_states = True + config.output_attentions = self.has_attentions + + # no need to test all models as different heads yield the same functionality + model_class = self.all_model_classes[0] + model = model_class(config) + model.to(torch_device) + + inputs = self._prepare_for_class(inputs_dict, model_class) + + outputs = model(**inputs) + + output = outputs[0] + + if config.is_encoder_decoder: + # Seq2Seq models + encoder_hidden_states = outputs.encoder_hidden_states[0] + encoder_hidden_states.retain_grad() + + decoder_hidden_states = outputs.decoder_hidden_states[0] + decoder_hidden_states.retain_grad() + + if self.has_attentions: + encoder_attentions = outputs.encoder_attentions[0] + encoder_attentions["local"].retain_grad() + encoder_attentions["global"].retain_grad() + + decoder_attentions = outputs.decoder_attentions[0] + decoder_attentions.retain_grad() + + cross_attentions = outputs.cross_attentions[0] + cross_attentions.retain_grad() + + output.flatten()[0].backward(retain_graph=True) + + self.assertIsNotNone(encoder_hidden_states.grad) + self.assertIsNotNone(decoder_hidden_states.grad) + + if self.has_attentions: + self.assertIsNotNone(encoder_attentions["local"].grad) + self.assertIsNotNone(encoder_attentions["global"].grad) + self.assertIsNotNone(decoder_attentions.grad) + self.assertIsNotNone(cross_attentions.grad) + else: + # Encoder-/Decoder-only models + hidden_states = outputs.hidden_states[0] + hidden_states.retain_grad() + + if self.has_attentions: + attentions = outputs.attentions[0] + attentions.retain_grad() + + output.flatten()[0].backward(retain_graph=True) + + self.assertIsNotNone(hidden_states.grad) + + if self.has_attentions: + self.assertIsNotNone(attentions.grad) + + @classmethod + def round_up(cls, n, k): + return math.ceil(n / k) * k + + +def assert_tensors_close(a, b, atol=1e-12, prefix=""): + """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" + if a is None and b is None: + return True + try: + if torch.allclose(a, b, atol=atol): + return True + raise + except Exception: + pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() + if a.numel() > 100: + msg = f"tensor values are {pct_different:.1%} percent different." + else: + msg = f"{a} != {b}" + if prefix: + msg = prefix + ": " + msg + raise AssertionError(msg) + + +def _long_tensor(tok_lst): + return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) + + +TOLERANCE = 1e-4 + + +@require_torch +@require_sentencepiece +@require_tokenizers +@slow +class PegasusXModelIntegrationTests(unittest.TestCase): + @cached_property + def default_tokenizer(self): + return PegasusTokenizer.from_pretrained("google/pegasus-x-base") + + def test_inference_no_head(self): + model = PegasusXModel.from_pretrained("pegasus-x-base").to(torch_device) + input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) + decoder_input_ids = _long_tensor([[2, 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588]]) + inputs_dict = prepare_pegasus_x_inputs_dict(model.config, input_ids, decoder_input_ids) + with torch.no_grad(): + output = model(**inputs_dict)[0] + expected_shape = torch.Size((1, 11, 1024)) + self.assertEqual(output.shape, expected_shape) + # change to expected output here + expected_slice = torch.tensor( + [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device + ) + self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) + + def test_inference_head(self): + model = PegasusXForConditionalGeneration.from_pretrained("pegasus-x-base").to(torch_device) + + # change to intended input + input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) + decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) + inputs_dict = prepare_pegasus_x_inputs_dict(model.config, input_ids, decoder_input_ids) + with torch.no_grad(): + output = model(**inputs_dict)[0] + expected_shape = torch.Size((1, 11, model.config.vocab_size)) + self.assertEqual(output.shape, expected_shape) + # change to expected output here + expected_slice = torch.tensor( + [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device + ) + self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) + + def test_seq_to_seq_generation(self): + hf = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base").to(torch_device) + tok = PegasusTokenizer.from_pretrained("google/pegasus-x-large") + + batch_input = [ + "While large pretrained Transformer models have proven highly capable at tackling natural language tasks," + " handling long sequence inputs continues to be a significant challenge. One such task is long input" + " summarization, where inputs are longer than the maximum input context of most pretrained models. Through" + " an extensive set of experiments, we investigate what model architectural changes and pretraining" + " paradigms can most efficiently adapt a pretrained Transformer for long input summarization. We find that" + " a staggered, block-local Transformer with global encoder tokens strikes a good balance of performance" + " and efficiency, and that an additional pretraining phase on long sequences meaningfully improves" + " downstream summarization performance. Based on our findings, we introduce PEGASUS-X, an extension of the" + " PEGASUS model with additional long input pretraining to handle inputs of up to 16K tokens. PEGASUS-X" + " achieves strong performance on long input summarization tasks comparable with much larger models while" + " adding few additional parameters and not requiring model parallelism to train." + ] + + # The below article tests that we don't add any hypotheses outside of the top n_beams + dct = tok.batch_encode_plus( + batch_input, + max_length=512, + padding="max_length", + truncation_strategy="only_first", + truncation=True, + return_tensors="pt", + ) + + hypotheses_batch = hf.generate( + input_ids=dct["input_ids"].to(torch_device), + attention_mask=dct["attention_mask"].to(torch_device), + num_beams=2, + max_length=32, + ) + + EXPECTED = [ + "we investigate the performance of a new pretrained model for long input summarization. the model" + ] + + generated = tok.batch_decode( + hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True + ) + assert generated == EXPECTED + + +class PegasusXStandaloneDecoderModelTester: + def __init__( + self, + parent, + vocab_size=99, + batch_size=13, + d_model=16, + decoder_seq_length=7, + is_training=True, + is_decoder=True, + use_attention_mask=True, + use_cache=False, + use_labels=True, + decoder_start_token_id=2, + decoder_ffn_dim=32, + decoder_layers=4, + encoder_attention_heads=4, + decoder_attention_heads=4, + max_position_embeddings=30, + is_encoder_decoder=False, + pad_token_id=0, + bos_token_id=1, + eos_token_id=2, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.decoder_seq_length = decoder_seq_length + # For common tests + self.seq_length = self.decoder_seq_length + self.is_training = is_training + self.use_attention_mask = use_attention_mask + self.use_labels = use_labels + + self.vocab_size = vocab_size + self.d_model = d_model + self.hidden_size = d_model + self.num_hidden_layers = decoder_layers + self.decoder_layers = decoder_layers + self.decoder_ffn_dim = decoder_ffn_dim + self.encoder_attention_heads = encoder_attention_heads + self.decoder_attention_heads = decoder_attention_heads + self.num_attention_heads = decoder_attention_heads + self.eos_token_id = eos_token_id + self.bos_token_id = bos_token_id + self.pad_token_id = pad_token_id + self.decoder_start_token_id = decoder_start_token_id + self.use_cache = use_cache + self.max_position_embeddings = max_position_embeddings + self.is_encoder_decoder = is_encoder_decoder + + self.scope = None + self.decoder_key_length = decoder_seq_length + self.base_model_out_len = 2 + self.decoder_attention_idx = 1 + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) + + attention_mask = None + if self.use_attention_mask: + attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) + + lm_labels = None + if self.use_labels: + lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) + + config = PegasusXConfig( + vocab_size=self.vocab_size, + d_model=self.d_model, + decoder_layers=self.decoder_layers, + decoder_ffn_dim=self.decoder_ffn_dim, + encoder_attention_heads=self.encoder_attention_heads, + decoder_attention_heads=self.decoder_attention_heads, + eos_token_id=self.eos_token_id, + bos_token_id=self.bos_token_id, + use_cache=self.use_cache, + pad_token_id=self.pad_token_id, + decoder_start_token_id=self.decoder_start_token_id, + max_position_embeddings=self.max_position_embeddings, + is_encoder_decoder=self.is_encoder_decoder, + ) + + return ( + config, + input_ids, + attention_mask, + lm_labels, + ) + + def create_and_check_decoder_model_past( + self, + config, + input_ids, + attention_mask, + lm_labels, + ): + config.use_cache = True + model = PegasusXDecoder(config=config).to(torch_device).eval() + # first forward pass + outputs = model(input_ids, use_cache=True) + outputs_use_cache_conf = model(input_ids) + outputs_no_past = model(input_ids, use_cache=False) + + self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) + self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) + + past_key_values = outputs["past_key_values"] + + # create hypothetical next token and extent to next_input_ids + next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) + + # append to next input_ids and + next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) + + output_from_no_past = model(next_input_ids)["last_hidden_state"] + output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] + + # select random slice + random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() + output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() + output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() + + # test that outputs are equal for slice + assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) + + def create_and_check_decoder_model_attention_mask_past( + self, + config, + input_ids, + attention_mask, + lm_labels, + ): + model = PegasusXDecoder(config=config).to(torch_device).eval() + + # create attention mask + attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) + + half_seq_length = input_ids.shape[-1] // 2 + attn_mask[:, half_seq_length:] = 0 + + # first forward pass + past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] + + # create hypothetical next token and extent to next_input_ids + next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) + + # change a random masked slice from input_ids + random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 + random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) + input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens + + # append to next input_ids and attn_mask + next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) + attn_mask = torch.cat( + [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], + dim=1, + ) + + # get two different outputs + output_from_no_past = model(next_input_ids)["last_hidden_state"] + output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] + + # select random slice + random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() + output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() + output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() + + # test that outputs are equal for slice + assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + ( + config, + input_ids, + attention_mask, + lm_labels, + ) = config_and_inputs + + inputs_dict = { + "input_ids": input_ids, + "attention_mask": attention_mask, + } + return config, inputs_dict + + +@require_torch +class PegasusXStandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = (PegasusXDecoder,) if is_torch_available() else () + all_generative_model_classes = () + test_pruning = False + is_encoder_decoder = False + test_head_masking = False + + def setUp( + self, + ): + self.model_tester = PegasusXStandaloneDecoderModelTester(self, is_training=False) + self.config_tester = ConfigTester(self, config_class=PegasusXConfig) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_decoder_model_past(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) + + def test_decoder_model_attn_mask_past(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) + + def test_retain_grad_hidden_states_attentions(self): + # decoder cannot keep gradients + return diff --git a/utils/check_repo.py b/utils/check_repo.py index c3ecfbebe48ceb..a9beda0c372709 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -82,6 +82,9 @@ "MvpEncoder", # Building part of bigger (tested) model. "PegasusEncoder", # Building part of bigger (tested) model. "PegasusDecoderWrapper", # Building part of bigger (tested) model. + "PegasusXEncoder", # Building part of bigger (tested) model. + "PegasusXDecoder", # Building part of bigger (tested) model. + "PegasusXDecoderWrapper", # Building part of bigger (tested) model. "DPREncoder", # Building part of bigger (tested) model. "ProphetNetDecoderWrapper", # Building part of bigger (tested) model. "RealmBertModel", # Building part of bigger (tested) model. @@ -125,6 +128,12 @@ # should **not** be the rule. IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [ # models to ignore for model xxx mapping + "PegasusXEncoder", + "PegasusXDecoder", + "PegasusXDecoderWrapper", + "PegasusXEncoder", + "PegasusXDecoder", + "PegasusXDecoderWrapper", "DPTForDepthEstimation", "DecisionTransformerGPT2Model", "GLPNForDepthEstimation", From bf9d5061378eeb46f9e37ee07fc0a5295ca9b730 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Fri, 2 Sep 2022 12:29:06 -0700 Subject: [PATCH 211/539] Update docs landing page (#18590) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📝 update docs landing page * 🖍 apply feedbacks * apply feedbacks * apply feedbacks, use
for list --- docs/source/en/index.mdx | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index a9bd25cffb222a..15ac6aa50aefd9 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -12,18 +12,18 @@ specific language governing permissions and limitations under the License. # 🤗 Transformers -State-of-the-art Machine Learning for PyTorch, TensorFlow and JAX. +State-of-the-art Machine Learning for [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/), and [JAX](https://jax.readthedocs.io/en/latest/). -🤗 Transformers provides APIs to easily download and train state-of-the-art pretrained models. Using pretrained models can reduce your compute costs, carbon footprint, and save you time from training a model from scratch. The models can be used across different modalities such as: +🤗 Transformers provides APIs and tools to easily download and train state-of-the-art pretrained models. Using pretrained models can reduce your compute costs, carbon footprint, and save you the time and resources required to train a model from scratch. These models support common tasks in different modalities, such as: -* 📝 Text: text classification, information extraction, question answering, summarization, translation, and text generation in over 100 languages. -* 🖼️ Images: image classification, object detection, and segmentation. -* 🗣️ Audio: speech recognition and audio classification. -* 🐙 Multimodal: table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering. +📝 **Natural Language Processing**: text classification, named entity recognition, question answering, language modeling, summarization, translation, multiple choice, and text generation.
+🖼️ **Computer Vision**: image classification, object detection, and segmentation.
+🗣️ **Audio**: automatic speech recognition and audio classification.
+🐙 **Multimodal**: table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering. -Our library supports seamless integration between three of the most popular deep learning libraries: [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/) and [JAX](https://jax.readthedocs.io/en/latest/). Train your model in three lines of code in one framework, and load it for inference with another. +🤗 Transformers support framework interoperability between PyTorch, TensorFlow, and JAX. This provides the flexibility to use a different framework at each stage of a model's life; train a model in three lines of code in one framework, and load it for inference in another. Models can also be exported to a format like ONNX and TorchScript for deployment in production environments. -Each 🤗 Transformers architecture is defined in a standalone Python module so they can be easily customized for research and experiments. +Join the growing community on the [Hub](https://huggingface.co/models), [forum](https://discuss.huggingface.co/), or [Discord](https://discord.com/invite/JfAtkvEtRb) today! ## If you are looking for custom support from the Hugging Face team @@ -33,19 +33,17 @@ Each 🤗 Transformers architecture is defined in a standalone Python module so ## Contents -The documentation is organized in five parts: +The documentation is organized into five sections: -- **GET STARTED** contains a quick tour and installation instructions to get up and running with 🤗 Transformers. -- **TUTORIALS** are a great place to begin if you are new to our library. This section will help you gain the basic skills you need to start using 🤗 Transformers. -- **HOW-TO GUIDES** will show you how to achieve a specific goal like fine-tuning a pretrained model for language modeling or how to create a custom model head. -- **CONCEPTUAL GUIDES** provides more discussion and explanation of the underlying concepts and ideas behind models, tasks, and the design philosophy of 🤗 Transformers. -- **API** describes each class and function, grouped in: +- **GET STARTED** provides a quick tour of the library and installation instructions to get up and running. +- **TUTORIALS** are a great place to start if you're a beginner. This section will help you gain the basic skills you need to start using the library. +- **HOW-TO GUIDES** show you how to achieve a specific goal, like finetuning a pretrained model for language modeling or how to write and share a custom model. +- **CONCEPTUAL GUIDES** offers more discussion and explanation of the underlying concepts and ideas behind models, tasks, and the design philosophy of 🤗 Transformers. +- **API** describes all classes and functions: - - **MAIN CLASSES** for the main classes exposing the important APIs of the library. - - **MODELS** for the classes and functions related to each model implemented in the library. - - **INTERNAL HELPERS** for the classes and functions we use internally. - -The library currently contains JAX, PyTorch and TensorFlow implementations, pretrained model weights, usage scripts and conversion utilities for the following models. + - **MAIN CLASSES** details the most important classes like configuration, model, tokenizer, and pipeline. + - **MODELS** details the classes and functions related to each model implemented in the library. + - **INTERNAL HELPERS** details utility classes and functions used internally. ### Supported models From ae32f3afefcd3288df0af47d8499ae6024c66612 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Fri, 2 Sep 2022 12:29:51 -0700 Subject: [PATCH 212/539] Finetune guide for semantic segmentation (#18640) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📝 first draft * oops add to toctree * make style * 📝 add inference section * 🖍 make style * 📝 add images * 🖍 apply feedbacks * remove num_labels and pytorch block * apply feedbacks, add colab notebook Co-authored-by: Steven --- docs/source/en/_toctree.yml | 2 + .../source/en/tasks/semantic_segmentation.mdx | 286 ++++++++++++++++++ 2 files changed, 288 insertions(+) create mode 100644 docs/source/en/tasks/semantic_segmentation.mdx diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index afb501b96e3058..3353e82ad1e42c 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -69,6 +69,8 @@ - sections: - local: tasks/image_classification title: Image classification + - local: tasks/semantic_segmentation + title: Semantic segmentation title: Computer Vision - sections: - local: performance diff --git a/docs/source/en/tasks/semantic_segmentation.mdx b/docs/source/en/tasks/semantic_segmentation.mdx new file mode 100644 index 00000000000000..c288449552d100 --- /dev/null +++ b/docs/source/en/tasks/semantic_segmentation.mdx @@ -0,0 +1,286 @@ + + +# Semantic segmentation + +[[open-in-colab]] + + + +Semantic segmentation assigns a label or class to each individual pixel of an image. There are several types of segmentation, and in the case of semantic segmentation, no distinction is made between unique instances of the same object. Both objects are given the same label (for example, "car" instead of "car-1" and "car-2"). Common real-world applications of semantic segmentation include training self-driving cars to identify pedestrians and important traffic information, identifying cells and abnormalities in medical imagery, and monitoring environmental changes from satellite imagery. + +This guide will show you how to finetune [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer#segformer) on the [SceneParse150](https://huggingface.co/datasets/scene_parse_150) dataset. + + + +See the image segmentation [task page](https://huggingface.co/tasks/image-segmentation) for more information about its associated models, datasets, and metrics. + + + +Before you begin, make sure you have all the necessary libraries installed: + +```bash +pip install -q datasets transformers evaluate +``` + +## Load SceneParse150 dataset + +Load the first 50 examples of the SceneParse150 dataset from the 🤗 Datasets library so you can quickly train and test a model: + +```py +>>> from datasets import load_dataset + +>>> ds = load_dataset("scene_parse_150", split="train[:50]") +``` + +Split this dataset into a train and test set: + +```py +>>> ds = ds.train_test_split(test_size=0.2) +>>> train_ds = ds["train"] +>>> test_ds = ds["test"] +``` + +Then take a look at an example: + +```py +>>> train_ds[0] +{'image': , + 'annotation': , + 'scene_category': 368} +``` + +There is an `image`, an `annotation` (this is the segmentation map or label), and a `scene_category` field that describes the image scene, like "kitchen" or "office". In this guide, you'll only need `image` and `annotation`, both of which are PIL images. + +You'll also want to create a dictionary that maps a label id to a label class which will be useful when you set up the model later. Download the mappings from the Hub and create the `id2label` and `label2id` dictionaries: + +```py +>>> import json +>>> from huggingface_hub import cached_download, hf_hub_url + +>>> repo_id = "datasets/huggingface/label-files" +>>> filename = "ade20k-id2label.json" +>>> id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename)), "r")) +>>> id2label = {int(k): v for k, v in id2label.items()} +>>> label2id = {v: k for k, v in id2label.items()} +>>> num_labels = len(id2label) +``` + +## Preprocess + +Next, load a SegFormer feature extractor to prepare the images and annotations for the model. Some datasets, like this one, use the zero-index as the background class. However, the background class isn't included in the 150 classes, so you'll need to set `reduce_labels=True` to subtract one from all the labels. The zero-index is replaced by `255` so it's ignored by SegFormer's loss function: + +```py +>>> from transformers import AutoFeatureExtractor + +>>> feature_extractor = AutoFeatureExtractor.from_pretrained("nvidia/mit-b0", reduce_labels=True) +``` + +It is common to apply some data augmentations to an image dataset to make a model more robust against overfitting. In this guide, you'll use the [`ColorJitter`](https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html) function from [torchvision](https://pytorch.org/vision/stable/index.html) to randomly change the color properties of an image: + +```py +>>> from torchvision.transforms import ColorJitter + +>>> jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) +``` + +Now create two preprocessing functions to prepare the images and annotations for the model. These functions convert the images into `pixel_values` and annotations to `labels`. For the training set, `jitter` is applied before providing the images to the feature extractor. For the test set, the feature extractor crops and normalizes the `images`, and only crops the `labels` because no data augmentation is applied during testing. + +```py +>>> def train_transforms(example_batch): +... images = [jitter(x) for x in example_batch["image"]] +... labels = [x for x in example_batch["annotation"]] +... inputs = feature_extractor(images, labels) +... return inputs + + +>>> def val_transforms(example_batch): +... images = [x for x in example_batch["image"]] +... labels = [x for x in example_batch["annotation"]] +... inputs = feature_extractor(images, labels) +... return inputs +``` + +To apply the `jitter` over the entire dataset, use the 🤗 Datasets [`~datasets.Dataset.set_transform`] function. The transform is applied on the fly which is faster and consumes less disk space: + +```py +>>> train_ds.set_transform(train_transforms) +>>> test_ds.set_transform(val_transforms) +``` + +## Train + +Load SegFormer with [`AutoModelForSemanticSegmentation`], and pass the model the mapping between label ids and label classes: + +```py +>>> from transformers import AutoModelForSemanticSegmentation + +>>> pretrained_model_name = "nvidia/mit-b0" +>>> model = AutoModelForSemanticSegmentation.from_pretrained( +... pretrained_model_name, id2label=id2label, label2id=label2id +... ) +``` + + + +If you aren't familiar with finetuning a model with the [`Trainer`], take a look at the basic tutorial [here](../training#finetune-with-trainer)! + + + +Define your training hyperparameters in [`TrainingArguments`]. It is important not to remove unused columns because this will drop the `image` column. Without the `image` column, you can't create `pixel_values`. Set `remove_unused_columns=False` to prevent this behavior! + +To save and push a model under your namespace to the Hub, set `push_to_hub=True`: + +```py +>>> from transformers import TrainingArguments + +>>> training_args = TrainingArguments( +... output_dir="segformer-b0-scene-parse-150", +... learning_rate=6e-5, +... num_train_epochs=50, +... per_device_train_batch_size=2, +... per_device_eval_batch_size=2, +... save_total_limit=3, +... evaluation_strategy="steps", +... save_strategy="steps", +... save_steps=20, +... eval_steps=20, +... logging_steps=1, +... eval_accumulation_steps=5, +... remove_unused_columns=False, +... push_to_hub=True, +... ) +``` + +To evaluate model performance during training, you'll need to create a function to compute and report metrics. For semantic segmentation, you'll typically compute the [mean Intersection over Union](https://huggingface.co/spaces/evaluate-metric/mean_iou) (IoU). The mean IoU measures the overlapping area between the predicted and ground truth segmentation maps. + +Load the mean IoU from the 🤗 Evaluate library: + +```py +>>> import evaluate + +>>> metric = evaluate.load("mean_iou") +``` + +Then create a function to [`~evaluate.EvaluationModule.compute`] the metrics. Your predictions need to be converted to logits first, and then reshaped to match the size of the labels before you can call [`~evaluate.EvaluationModule.compute`]: + +```py +>>> def compute_metrics(eval_pred): +... with torch.no_grad(): +... logits, labels = eval_pred +... logits_tensor = torch.from_numpy(logits) +... logits_tensor = nn.functional.interpolate( +... logits_tensor, +... size=labels.shape[-2:], +... mode="bilinear", +... align_corners=False, +... ).argmax(dim=1) + +... pred_labels = logits_tensor.detach().cpu().numpy() +... metrics = metric.compute( +... predictions=pred_labels, +... references=labels, +... num_labels=num_labels, +... ignore_index=255, +... reduce_labels=False, +... ) +... for key, value in metrics.items(): +... if type(value) is np.ndarray: +... metrics[key] = value.tolist() +... return metrics +``` + +Pass your model, training arguments, datasets, and metrics function to the [`Trainer`]: + +```py +>>> from transformers import Trainer + +>>> trainer = Trainer( +... model=model, +... args=training_args, +... train_dataset=train_ds, +... eval_dataset=test_ds, +... compute_metrics=compute_metrics, +... ) +``` + +Lastly, call [`~Trainer.train`] to finetune your model: + +```py +>>> trainer.train() +``` + +## Inference + +Great, now that you've finetuned a model, you can use it for inference! + +Load an image for inference: + +```py +>>> image = ds[0]["image"] +>>> image +``` + +
+ Image of bedroom +
+ +Process the image with a feature extractor and place the `pixel_values` on a GPU: + +```py +>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use GPU if available, otherwise use a CPU +>>> encoding = feature_extractor(image, return_tensors="pt") +>>> pixel_values = encoding.pixel_values.to(device) +``` + +Pass your input to the model and return the `logits`: + +```py +>>> outputs = model(pixel_values=pixel_values) +>>> logits = outputs.logits.cpu() +``` + +Next, rescale the logits to the original image size: + +```py +>>> upsampled_logits = nn.functional.interpolate( +... logits, +... size=image.size[::-1], +... mode="bilinear", +... align_corners=False, +... ) + +>>> pred_seg = upsampled_logits.argmax(dim=1)[0] +``` + +To visualize the results, load the [dataset color palette](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51) that maps each class to their RGB values. Then you can combine and plot your image and the predicted segmentation map: + +```py +>>> import matplotlib.pyplot as plt + +>>> color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8) +>>> palette = np.array(ade_palette()) +>>> for label, color in enumerate(palette): +... color_seg[pred_seg == label, :] = color +>>> color_seg = color_seg[..., ::-1] # convert to BGR + +>>> img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map +>>> img = img.astype(np.uint8) + +>>> plt.figure(figsize=(15, 10)) +>>> plt.imshow(img) +>>> plt.show() +``` + +
+ Image of bedroom overlayed with segmentation map +
\ No newline at end of file From 65fb71bc762c46bb067306c1fd083b1cba87a095 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Fri, 2 Sep 2022 13:05:31 -0700 Subject: [PATCH 213/539] Add Trainer to quicktour (#18723) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📝 update quicktour * 📝 add trainer section * 🖍 markdown table, apply feedbacks * ✨ make style * add tf training section * make style --- docs/source/en/quicktour.mdx | 273 +++++++++++++++++++++++------------ 1 file changed, 183 insertions(+), 90 deletions(-) diff --git a/docs/source/en/quicktour.mdx b/docs/source/en/quicktour.mdx index c5b333bf5694fe..f1b3ca5bf0f688 100644 --- a/docs/source/en/quicktour.mdx +++ b/docs/source/en/quicktour.mdx @@ -14,53 +14,15 @@ specific language governing permissions and limitations under the License. [[open-in-colab]] -Get up and running with 🤗 Transformers! Start using the [`pipeline`] for rapid inference, and quickly load a pretrained model and tokenizer with an [AutoClass](./model_doc/auto) to solve your text, vision or audio task. +Get up and running with 🤗 Transformers! Whether you're a developer or an everyday user, this quick tour will help you get started and show you how to use the [`pipeline`] for inference, load a pretrained model and preprocessor with an [AutoClass](./model_doc/auto), and quickly train a model with PyTorch or TensorFlow. If you're a beginner, we recommend checking out our tutorials or [course](https://huggingface.co/course/chapter1/1) next for more in-depth explanations of the concepts introduced here. - - -All code examples presented in the documentation have a toggle on the top left for PyTorch and TensorFlow. If -not, the code is expected to work for both backends without any change. - - - -## Pipeline - -[`pipeline`] is the easiest way to use a pretrained model for a given task. - - - -The [`pipeline`] supports many common tasks out-of-the-box: - -**Text**: -* Sentiment analysis: classify the polarity of a given text. -* Text generation (in English): generate text from a given input. -* Name entity recognition (NER): label each word with the entity it represents (person, date, location, etc.). -* Question answering: extract the answer from the context, given some context and a question. -* Fill-mask: fill in the blank given a text with masked words. -* Summarization: generate a summary of a long sequence of text or document. -* Translation: translate text into another language. -* Feature extraction: create a tensor representation of the text. +Before you begin, make sure you have all the necessary libraries installed: -**Image**: -* Image classification: classify an image. -* Image segmentation: classify every pixel in an image. -* Object detection: detect objects within an image. - -**Audio**: -* Audio classification: assign a label to a given segment of audio. -* Automatic speech recognition (ASR): transcribe audio data into text. - - - -For more details about the [`pipeline`] and associated tasks, refer to the documentation [here](./main_classes/pipelines). - - - -### Pipeline usage - -In the following example, you will use the [`pipeline`] for sentiment analysis. +```bash +!pip install transformers datasets +``` -Install the following dependencies if you haven't already: +You'll also need to install your preferred machine learning framework: @@ -75,7 +37,29 @@ pip install tensorflow
-Import [`pipeline`] and specify the task you want to complete: +## Pipeline + + + +The [`pipeline`] is the easiest way to use a pretrained model for inference. You can use the [`pipeline`] out-of-the-box for many tasks across different modalities. Take a look at the table below for some supported tasks: + +| **Task** | **Description** | **Modality** | **Pipeline identifier** | +|------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|-----------------------------------------------| +| Text classification | assign a label to a given sequence of text | NLP | pipeline(task="sentiment-analysis") | +| Text generation | generate text that follows a given prompt | NLP | pipeline(task="text-generation") | +| Name entity recognition | assign a label to each token in a sequence (people, organization, location, etc.) | NLP | pipeline(task="ner") | +| Question answering | extract an answer from the text given some context and a question | NLP | pipeline(task="question-answering") | +| Fill-mask | predict the correct masked token in a sequence | NLP | pipeline(task="fill-mask") | +| Summarization | generate a summary of a sequence of text or document | NLP | pipeline(task="summarization") | +| Translation | translate text from one language into another | NLP | pipeline(task="translation") | +| Image classification | assign a label to an image | Computer vision | pipeline(task="image-classification") | +| Image segmentation | assign a label to each individual pixel of an image (supports semantic, panoptic, and instance segmentation) | Computer vision | pipeline(task="image-segmentation") | +| Object detection | predict the bounding boxes and classes of objects in an image | Computer vision | pipeline(task="object-detection") | +| Audio classification | assign a label to an audio file | Audio | pipeline(task="audio-classification") | +| Automatic speech recognition | extract speech from an audio file into text | Audio | pipeline(task="automatic-speech-recognition") | +| Visual question answering | given an image and a question, correctly answer a question about the image | Multimodal | pipeline(task="vqa") | + +Start by creating an instance of [`pipeline`] and specifying a task you want to use it for. You can use the [`pipeline`] for any of the previously mentioned tasks, and for a complete list of supported tasks, take a look at the [pipeline API reference](./main_classes/pipelines). In this guide though, you'll use the [`pipeline`] for sentiment analysis as an example: ```py >>> from transformers import pipeline @@ -83,14 +67,14 @@ Import [`pipeline`] and specify the task you want to complete: >>> classifier = pipeline("sentiment-analysis") ``` -The pipeline downloads and caches a default [pretrained model](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) and tokenizer for sentiment analysis. Now you can use the `classifier` on your target text: +The [`pipeline`] downloads and caches a default [pretrained model](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) and tokenizer for sentiment analysis. Now you can use the `classifier` on your target text: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` -For more than one sentence, pass a list of sentences to the [`pipeline`] which returns a list of dictionaries: +If you have more than one input, pass your inputs as a list to the [`pipeline`] to return a list of dictionaries: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) @@ -100,13 +84,7 @@ label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` -The [`pipeline`] can also iterate over an entire dataset. Start by installing the [🤗 Datasets](https://huggingface.co/docs/datasets/) library: - -```bash -pip install datasets -``` - -Create a [`pipeline`] with the task you want to solve for and the model you want to use. +The [`pipeline`] can also iterate over an entire dataset for any task you like. For this example, let's choose automatic speech recognition as our task: ```py >>> import torch @@ -115,7 +93,7 @@ Create a [`pipeline`] with the task you want to solve for and the model you want >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` -Next, load a dataset (see the 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) for more details) you'd like to iterate over. For example, let's load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset: +Load an audio dataset (see the 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart#audio) for more details) you'd like to iterate over. For example, load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset: ```py >>> from datasets import load_dataset, Audio @@ -123,15 +101,15 @@ Next, load a dataset (see the 🤗 Datasets [Quick Start](https://huggingface.co >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` -We need to make sure that the sampling rate of the dataset matches the sampling -rate `facebook/wav2vec2-base-960h` was trained on. +You need to make sure the sampling rate of the dataset matches the sampling +rate [`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h) was trained on: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` -Audio files are automatically loaded and resampled when calling the `"audio"` column. -Let's extract the raw waveform arrays of the first 4 samples and pass it as a list to the pipeline: +The audio files are automatically loaded and resampled when calling the `"audio"` column. +Extract the raw waveform arrays from the first 4 samples and pass it as a list to the pipeline: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) @@ -139,11 +117,11 @@ Let's extract the raw waveform arrays of the first 4 samples and pass it as a li ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FODING HOW I'D SET UP A JOIN TO HET WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE AP SO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AND I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I THURN A JOIN A COUNT'] ``` -For a larger dataset where the inputs are big (like in speech or vision), you will want to pass along a generator instead of a list that loads all the inputs in memory. See the [pipeline documentation](./main_classes/pipelines) for more information. +For larger datasets where the inputs are big (like in speech or vision), you'll want to pass a generator instead of a list to load all the inputs in memory. Take a look at the [pipeline API reference](./main_classes/pipelines) for more information. ### Use another model and tokenizer in the pipeline -The [`pipeline`] can accommodate any model from the [Model Hub](https://huggingface.co/models), making it easy to adapt the [`pipeline`] for other use-cases. For example, if you'd like a model capable of handling French text, use the tags on the Model Hub to filter for an appropriate model. The top filtered result returns a multilingual [BERT model](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) fine-tuned for sentiment analysis. Great, let's use this model! +The [`pipeline`] can accommodate any model from the [Hub](https://huggingface.co/models), making it easy to adapt the [`pipeline`] for other use-cases. For example, if you'd like a model capable of handling French text, use the tags on the Hub to filter for an appropriate model. The top filtered result returns a multilingual [BERT model](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment) finetuned for sentiment analysis you can use for French text: ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" @@ -151,7 +129,7 @@ The [`pipeline`] can accommodate any model from the [Model Hub](https://huggingf -Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` below): +Use [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` in the next section): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification @@ -161,7 +139,7 @@ Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the ``` -Use the [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` below): +Use [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` in the next section): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification @@ -172,7 +150,7 @@ Use the [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load t -Then you can specify the model and tokenizer in the [`pipeline`], and apply the `classifier` on your target text: +Specify the model and tokenizer in the [`pipeline`], and now you can apply the `classifier` on French text: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) @@ -180,19 +158,19 @@ Then you can specify the model and tokenizer in the [`pipeline`], and apply the [{'label': '5 stars', 'score': 0.7273}] ``` -If you can't find a model for your use-case, you will need to fine-tune a pretrained model on your data. Take a look at our [fine-tuning tutorial](./training) to learn how. Finally, after you've fine-tuned your pretrained model, please consider sharing it (see tutorial [here](./model_sharing)) with the community on the Model Hub to democratize NLP for everyone! 🤗 +If you can't find a model for your use-case, you'll need to finetune a pretrained model on your data. Take a look at our [finetuning tutorial](./training) to learn how. Finally, after you've finetuned your pretrained model, please consider [sharing](./model_sharing) the model with the community on the Hub to democratize machine learning for everyone! 🤗 ## AutoClass -Under the hood, the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] classes work together to power the [`pipeline`]. An [AutoClass](./model_doc/auto) is a shortcut that automatically retrieves the architecture of a pretrained model from it's name or path. You only need to select the appropriate `AutoClass` for your task and it's associated tokenizer with [`AutoTokenizer`]. +Under the hood, the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] classes work together to power the [`pipeline`] you used above. An [AutoClass](./model_doc/auto) is a shortcut that automatically retrieves the architecture of a pretrained model from it's name or path. You only need to select the appropriate `AutoClass` for your task and it's associated preprocessing class. -Let's return to our example and see how you can use the `AutoClass` to replicate the results of the [`pipeline`]. +Let's return to the example from the previous section and see how you can use the `AutoClass` to replicate the results of the [`pipeline`]. ### AutoTokenizer -A tokenizer is responsible for preprocessing text into a format that is understandable to the model. First, the tokenizer will split the text into words called *tokens*. There are multiple rules that govern the tokenization process, including how to split a word and at what level (learn more about tokenization [here](./tokenizer_summary)). The most important thing to remember though is you need to instantiate the tokenizer with the same model name to ensure you're using the same tokenization rules a model was pretrained with. +A tokenizer is responsible for preprocessing text into an array of numbers as inputs to a model. There are multiple rules that govern the tokenization process, including how to split a word and at what level words should be split (learn more about tokenization in the [tokenizer summary](./tokenizer_summary)). The most important thing to remember is you need to instantiate a tokenizer with the same model name to ensure you're using the same tokenization rules a model was pretrained with. Load a tokenizer with [`AutoTokenizer`]: @@ -203,8 +181,6 @@ Load a tokenizer with [`AutoTokenizer`]: >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` -Next, the tokenizer converts the tokens into numbers in order to construct a tensor as input to the model. This is known as the model's *vocabulary*. - Pass your text to the tokenizer: ```py @@ -215,12 +191,12 @@ Pass your text to the tokenizer: 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` -The tokenizer will return a dictionary containing: +The tokenizer returns a dictionary containing: * [input_ids](./glossary#input-ids): numerical representions of your tokens. * [atttention_mask](.glossary#attention-mask): indicates which tokens should be attended to. -Just like the [`pipeline`], the tokenizer will accept a list of inputs. In addition, the tokenizer can also pad and truncate the text to return a batch with uniform length: +A tokenizer can also accept a list of inputs, and pad and truncate the text to return a batch with uniform length: @@ -247,13 +223,17 @@ Just like the [`pipeline`], the tokenizer will accept a list of inputs. In addit -Read the [preprocessing](./preprocessing) tutorial for more details about tokenization. + + +Check out the [preprocess](./preprocessing) tutorial for more details about tokenization, and how to use an [`AutoFeatureExtractor`] and [`AutoProcessor`] to preprocess image, audio, and multimodal inputs. + + ### AutoModel -🤗 Transformers provides a simple and unified way to load pretrained instances. This means you can load an [`AutoModel`] like you would load an [`AutoTokenizer`]. The only difference is selecting the correct [`AutoModel`] for the task. Since you are doing text - or sequence - classification, load [`AutoModelForSequenceClassification`]: +🤗 Transformers provides a simple and unified way to load pretrained instances. This means you can load an [`AutoModel`] like you would load an [`AutoTokenizer`]. The only difference is selecting the correct [`AutoModel`] for the task. For text (or sequence) classification, you should load [`AutoModelForSequenceClassification`]: ```py >>> from transformers import AutoModelForSequenceClassification @@ -264,11 +244,11 @@ Read the [preprocessing](./preprocessing) tutorial for more details about tokeni -See the [task summary](./task_summary) for which [`AutoModel`] class to use for which task. +See the [task summary](./task_summary) for tasks supported by an [`AutoModel`] class. -Now you can pass your preprocessed batch of inputs directly to the model. You just have to unpack the dictionary by adding `**`: +Now pass your preprocessed batch of inputs directly to the model. You just have to unpack the dictionary by adding `**`: ```py >>> pt_outputs = pt_model(**pt_batch) @@ -286,7 +266,7 @@ tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], ``` -🤗 Transformers provides a simple and unified way to load pretrained instances. This means you can load an [`TFAutoModel`] like you would load an [`AutoTokenizer`]. The only difference is selecting the correct [`TFAutoModel`] for the task. Since you are doing text - or sequence - classification, load [`TFAutoModelForSequenceClassification`]: +🤗 Transformers provides a simple and unified way to load pretrained instances. This means you can load an [`TFAutoModel`] like you would load an [`AutoTokenizer`]. The only difference is selecting the correct [`TFAutoModel`] for the task. For text (or sequence) classification, you should load [`TFAutoModelForSequenceClassification`]: ```py >>> from transformers import TFAutoModelForSequenceClassification @@ -297,11 +277,11 @@ tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], -See the [task summary](./task_summary) for which [`AutoModel`] class to use for which task. +See the [task summary](./task_summary) for tasks supported by an [`AutoModel`] class. -Now you can pass your preprocessed batch of inputs directly to the model by passing the dictionary keys directly to the tensors: +Now pass your preprocessed batch of inputs directly to the model by passing the dictionary keys directly to the tensors: ```py >>> tf_outputs = tf_model(tf_batch) @@ -320,17 +300,8 @@ The model outputs the final activations in the `logits` attribute. Apply the sof -All 🤗 Transformers models (PyTorch or TensorFlow) outputs the tensors *before* the final activation -function (like softmax) because the final activation function is often fused with the loss. - - - -Models are a standard [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) so you can use them in your usual training loop. However, to make things easier, 🤗 Transformers provides a [`Trainer`] class for PyTorch that adds functionality for distributed training, mixed precision, and more. For TensorFlow, you can use the `fit` method from [Keras](https://keras.io/). Refer to the [training tutorial](./training) for more details. - - - -🤗 Transformers model outputs are special dataclasses so their attributes are autocompleted in an IDE. -The model outputs also behave like a tuple or a dictionary (e.g., you can index with an integer, a slice or a string) in which case the attributes that are `None` are ignored. +All 🤗 Transformers models (PyTorch or TensorFlow) output the tensors *before* the final activation +function (like softmax) because the final activation function is often fused with the loss. Model outputs are special dataclasses so their attributes are autocompleted in an IDE. The model outputs behave like a tuple or a dictionary (you can index with an integer, a slice or a string) in which case, attributes that are None are ignored. @@ -425,6 +396,128 @@ Create a model from your custom configuration with [`TFAutoModel.from_config`]: Take a look at the [Create a custom architecture](./create_a_model) guide for more information about building custom configurations. +## Trainer - a PyTorch optimized training loop + +All models are a standard [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) so you can use them in any typical training loop. While you can write your own training loop, 🤗 Transformers provides a [`Trainer`] class for PyTorch, which contains the basic training loop and adds additional functionality for features like distributed training, mixed precision, and more. + +Depending on your task, you'll typically pass the following parameters to [`Trainer`]: + +1. A [`PreTrainedModel`] or a [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module): + + ```py + >>> from transformers import AutoModelForSequenceClassification + + >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased") + ``` + +2. [`TrainingArguments`] contains the model hyperparameters you can change like learning rate, batch size, and the number of epochs to train for. The default values are used if you don't specify any training arguments: + + ```py + >>> from transformers import TrainingArguments + + >>> training_args = TrainingArguments( + ... output_dir="path/to/save/folder/", + ... learning_rate=2e-5, + ... per_device_train_batch_size=8, + ... per_device_eval_batch_size=8, + ... num_train_epochs=2, + ... ) + ``` + +3. A preprocessing class like a tokenizer, feature extractor, or processor: + + ```py + >>> from transformers import AutoTokenizer + + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") + ``` + +4. Your preprocessed train and test datasets: + + ```py + >>> train_dataset = dataset["train"] + >>> eval_dataset = dataset["eval"] + ``` + +5. A [`DataCollator`] to create a batch of examples from your dataset: + + ```py + >>> from transformers import DefaultDataCollator + + >>> data_collator = DefaultDataCollator() + ``` + +Now gather all these classes in [`Trainer`]: + +```py +>>> from transformers import Trainer + +>>> trainer = Trainer( +... model=model, +... args=training_args, +... train_dataset=dataset["train"], +... eval_dataset=dataset["test"], +... tokenizer=tokenizer, +... data_collator=data_collator, +... ) +``` + +When you're ready, call [`~Trainer.train`] to start training: + +```py +>>> trainer.train() +``` + + + +For tasks - like translation or summarization - that use a sequence-to-sequence model, use the [`Seq2SeqTrainer`] and [`Seq2SeqTrainingArguments`] classes instead. + + + +You can customize the training loop behavior by subclassing the methods inside [`Trainer`]. This allows you to customize features such as the loss function, optimizer, and scheduler. Take a look at the [`Trainer`] reference for which methods can be subclassed. + +The other way to customize the training loop is by using [Callbacks](./main_classes/callbacks). You can use callbacks to integrate with other libraries and inspect the training loop to report on progress or stop the training early. Callbacks do not modify anything in the training loop itself. To customize something like the loss function, you need to subclass the [`Trainer`] instead. + +## Train with TensorFlow + +All models are a standard [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) so they can be trained in TensorFlow with the [Keras](https://keras.io/) API. 🤗 Transformers provides the [`~TFPreTrainedModel.prepare_tf_dataset`] method to easily load your dataset as a `tf.data.Dataset` so you can start training right away with Keras' [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) and [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) methods. + +1. You'll start with a [`TFPreTrainedModel`] or a [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model): + + ```py + >>> from transformers import TFAutoModelForSequenceClassification + + >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased") + ``` + +2. A preprocessing class like a tokenizer, feature extractor, or processor: + + ```py + >>> from transformers import AutoTokenizer + + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") + ``` + +3. Tokenize the dataset and pass it and the tokenizer to [`~TFPreTrainedModel.prepare_tf_dataset`]. You can also change the batch size and shuffle the dataset here if you'd like: + + ```py + >>> def tokenize_dataset(dataset): + ... return tokenizer(dataset["text"]) + + + >>> dataset = dataset.map(tokenize_dataset) + >>> tf_dataset = model.prepare_tf_dataset(dataset, batch_size=16, shuffle=True, tokenizer=tokenizer) + ``` + +4. When you're ready, you can call `compile` and `fit` to start training: + + ```py + >>> from tensorflow.keras.optimizers import Adam + + >>> model.compile(optimizer=Adam(3e-5)) + >>> model.fit(dataset) + ``` + ## What's next? Now that you've completed the 🤗 Transformers quick tour, check out our guides and learn how to do more specific things like writing a custom model, fine-tuning a model for a task, and how to train a model with a script. If you're interested in learning more about 🤗 Transformers core concepts, grab a cup of coffee and take a look at our Conceptual Guides! \ No newline at end of file From 7f27e002fd09ffffd2234f40e6d35912a1ee48f0 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Mon, 5 Sep 2022 09:20:27 +0100 Subject: [PATCH 214/539] TF: TFMarianMTModel final logits bias as a layer (#18833) * bias as a layer * alias the bias (hah, it rhymes) * add comment with info --- .../models/marian/modeling_tf_marian.py | 22 +++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index 0c2a0334dbae59..50c96a82b048b2 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -1269,6 +1269,23 @@ def serving_output(self, output): ) +class BiasLayer(tf.keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + @add_start_docstrings( "The MARIAN Model with a language modeling head. Can be used for summarization.", MARIAN_START_DOCSTRING, @@ -1284,9 +1301,10 @@ def __init__(self, config, *inputs, **kwargs): self.model = TFMarianMainLayer(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.final_logits_bias = self.add_weight( + self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) + self.final_logits_bias = self.bias_layer.bias # alias to keep the same interface with PT def get_decoder(self): return self.model.decoder @@ -1373,7 +1391,7 @@ def call( training=training, ) lm_logits = self.model.shared(outputs[0], mode="linear") - lm_logits = lm_logits + self.final_logits_bias + lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: From 591cfc6c90e363d27389180ef33229645bc80063 Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Mon, 5 Sep 2022 11:09:39 +0200 Subject: [PATCH 215/539] Mention TF and Flax checkpoints (#18894) --- docs/source/en/autoclass_tutorial.mdx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/source/en/autoclass_tutorial.mdx b/docs/source/en/autoclass_tutorial.mdx index 246b4b9b2e2ad8..00ad93881bc217 100644 --- a/docs/source/en/autoclass_tutorial.mdx +++ b/docs/source/en/autoclass_tutorial.mdx @@ -99,6 +99,8 @@ Easily reuse the same checkpoint to load an architecture for a different task: For PyTorch models, the `from_pretrained()` method uses `torch.load()` which internally uses `pickle` and is known to be insecure. In general, never load a model that could have come from an untrusted source, or that could have been tampered with. This security risk is partially mitigated for public models hosted on the Hugging Face Hub, which are [scanned for malware](https://huggingface.co/docs/hub/security-malware) at each commit. See the [Hub documentation](https://huggingface.co/docs/hub/security) for best practices like [signed commit verification](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg) with GPG. +TensorFlow and Flax checkpoints are not affected, and can be loaded within PyTorch architectures using the `from_tf` and `from_flax` kwargs for the `from_pretrained` method to circumvent this issue. +
Generally, we recommend using the `AutoTokenizer` class and the `AutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, feature extractor and processor to preprocess a dataset for fine-tuning. From badb9d2aaa58df2fddc09a868d8e3e5655b101a3 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Mon, 5 Sep 2022 11:25:00 +0200 Subject: [PATCH 216/539] Correct naming pegasus x (#18896) * add first generation tutorial * [Pegasus X] correct naming * [Generation] Remove --- tests/models/pegasus_x/test_modeling_pegasus_x.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/models/pegasus_x/test_modeling_pegasus_x.py b/tests/models/pegasus_x/test_modeling_pegasus_x.py index 17518ebe993ddc..2fade61ff76176 100644 --- a/tests/models/pegasus_x/test_modeling_pegasus_x.py +++ b/tests/models/pegasus_x/test_modeling_pegasus_x.py @@ -559,7 +559,7 @@ def default_tokenizer(self): return PegasusTokenizer.from_pretrained("google/pegasus-x-base") def test_inference_no_head(self): - model = PegasusXModel.from_pretrained("pegasus-x-base").to(torch_device) + model = PegasusXModel.from_pretrained("google/pegasus-x-base").to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[2, 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588]]) inputs_dict = prepare_pegasus_x_inputs_dict(model.config, input_ids, decoder_input_ids) @@ -574,7 +574,7 @@ def test_inference_no_head(self): self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): - model = PegasusXForConditionalGeneration.from_pretrained("pegasus-x-base").to(torch_device) + model = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base").to(torch_device) # change to intended input input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) From 17c634fd5b6c24396faf780b9b069beca1289d84 Mon Sep 17 00:00:00 2001 From: Surya Prakash Sahu Date: Mon, 5 Sep 2022 17:36:36 +0530 Subject: [PATCH 217/539] Update perf_train_gpu_one.mdx (#18442) --- docs/source/en/perf_train_gpu_one.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/perf_train_gpu_one.mdx b/docs/source/en/perf_train_gpu_one.mdx index 38d8534d85c840..767a48f280e1dd 100644 --- a/docs/source/en/perf_train_gpu_one.mdx +++ b/docs/source/en/perf_train_gpu_one.mdx @@ -288,7 +288,7 @@ Even when we set the batch size to 1 and use gradient accumulation we can still Gradient checkpointing strikes a compromise between the two approaches and saves strategically selected activations throughout the computational graph so only a fraction of the activations need to be re-computed for the gradients. See [this great article](https://medium.com/tensorflow/fitting-larger-networks-into-memory-583e3c758ff9) explaining the ideas behind gradient checkpointing. -To enable gradient checkpointing in the [`Trainer`] we only need ot pass it as a flag to the [`TrainingArguments`]. Everything else is handled under the hood: +To enable gradient checkpointing in the [`Trainer`] we only need to pass it as a flag to the [`TrainingArguments`]. Everything else is handled under the hood: ```py training_args = TrainingArguments( @@ -425,7 +425,7 @@ $ python examples/pytorch/translation/run_translation.py -h | grep "\-optim" For example, if you have [NVIDIA/apex](https://github.com/NVIDIA/apex) installed `--optim adamw_apex_fused` will give you the fastest training experience among all supported AdamW optimizers. -On the other hand [8bit BNB optimizer](https://github.com/facebookresearch/bitsandbytes) can save 3/4 of memory normally used by a typical AdamW optimizer if it is configured to quantize all optimizer states, but in some situations only some optimizer states are quintized and then more memory is used. XXX: update once https://github.com/huggingface/transformers/pull/15622 is merged. +On the other hand [8bit BNB optimizer](https://github.com/facebookresearch/bitsandbytes) can save 3/4 of memory normally used by a typical AdamW optimizer if it is configured to quantize all optimizer states, but in some situations only some optimizer states are quintized and then more memory is used. Let's get a feel for the numbers and use for example use a 3B-parameter model, like `t5-3b`. Note that since a Gigabyte correpsonds to a billion bytes we can simply multiply the parameters (in billions) with the number of necessary bytes per parameter to get Gigabytes of GPU memory usage: From cfd623a859890c6d106610d3c688064eadc7bd61 Mon Sep 17 00:00:00 2001 From: Sofia Oliveira <74454835+asofiaoliveira@users.noreply.github.com> Date: Mon, 5 Sep 2022 13:38:08 +0100 Subject: [PATCH 218/539] Add type hints to XLM-Roberta-XL models (#18475) * Add type hints to XLM-Roberta-XL models * Format --- .../xlm_roberta_xl/modeling_xlm_roberta_xl.py | 146 +++++++++--------- 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py index aa41466767d688..ca1c35bf650586 100644 --- a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py @@ -881,21 +881,21 @@ def set_output_embeddings(self, new_embeddings): @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - labels=None, - past_key_values=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if @@ -1040,19 +1040,19 @@ def set_output_embeddings(self, new_embeddings): ) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., @@ -1152,17 +1152,17 @@ def __init__(self, config): ) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., @@ -1250,17 +1250,17 @@ def __init__(self, config): ) def forward( self, - input_ids=None, - token_type_ids=None, - attention_mask=None, - labels=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., @@ -1347,17 +1347,17 @@ def __init__(self, config): ) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. @@ -1458,18 +1458,18 @@ def __init__(self, config): ) def forward( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - start_positions=None, - end_positions=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. From c6d3daba54b9c67a4696eeeea919c185a5fa4e6e Mon Sep 17 00:00:00 2001 From: zkep <36965534+zkep@users.noreply.github.com> Date: Tue, 6 Sep 2022 01:56:12 +0800 Subject: [PATCH 219/539] Update Chinese documentation (#18893) * update the translation --- README_zh-hans.md | 2 +- README_zh-hant.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README_zh-hans.md b/README_zh-hans.md index 1141748161510d..516f66ff96e633 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -173,7 +173,7 @@ checkpoint: 检查点 - 对所有模型统一的API 1. 更低计算开销,更少的碳排放: - - 研究人员可以分享亿训练的模型而非次次从头开始训练 + - 研究人员可以分享已训练的模型而非每次从头开始训练 - 工程师可以减少计算用时和生产环境开销 - 数十种模型架构、两千多个预训练模型、100多种语言支持 diff --git a/README_zh-hant.md b/README_zh-hant.md index c0444b8ba6a1bc..8b4c15dd13160d 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -185,7 +185,7 @@ Tokenizer 為所有的預訓練模型提供了預處理,並可以直接轉換 - 對所有模型使用的制式化API 1. 更低的運算成本,更少的碳排放: - - 研究人員可以分享預訓練的模型而非從頭開始訓練 + - 研究人員可以分享已訓練的模型而非每次從頭開始訓練 - 工程師可以減少計算時間以及生產成本 - 數十種模型架構、兩千多個預訓練模型、100多種語言支援 From d4dbd7ca59bd50dd034e7995cb36e5efed3d9512 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Mon, 5 Sep 2022 19:35:47 +0100 Subject: [PATCH 220/539] Generate: get the correct beam index on eos token (#18851) --- src/transformers/generation_beam_search.py | 2 +- tests/generation/test_generation_beam_search.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/generation_beam_search.py b/src/transformers/generation_beam_search.py index e0514edafbde83..7c50c0d7acdccc 100644 --- a/src/transformers/generation_beam_search.py +++ b/src/transformers/generation_beam_search.py @@ -259,7 +259,7 @@ def process( continue if beam_indices is not None: beam_index = beam_indices[batch_beam_idx] - beam_index = beam_index + (next_index,) + beam_index = beam_index + (batch_beam_idx,) else: beam_index = None diff --git a/tests/generation/test_generation_beam_search.py b/tests/generation/test_generation_beam_search.py index 885cefa62cbd51..66bfc29b5467dd 100644 --- a/tests/generation/test_generation_beam_search.py +++ b/tests/generation/test_generation_beam_search.py @@ -172,7 +172,7 @@ def cut_expected_tensor(tensor): input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) self.parent.assertListEqual( - expected_beam_indices + [next_indices[batch_idx, 1].item()], + expected_beam_indices + [correct_idx], torch.tensor(beam_scorer._beam_hyps[batch_idx].beams[0][2]).tolist(), ) From 734b7e2a5a717ccdebbda3f98da7ea947b417b65 Mon Sep 17 00:00:00 2001 From: Had Date: Tue, 6 Sep 2022 08:39:31 +0000 Subject: [PATCH 221/539] Mask t5 relative position bias then head pruned (#17968) * add position bias head masking if heads pruned * fix pruning function in t5 encoder * make style * make fix-copies * Revert added folder Co-authored-by: Patrick von Platen --- src/transformers/models/longt5/modeling_longt5.py | 9 ++++++++- src/transformers/models/t5/modeling_t5.py | 11 +++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/longt5/modeling_longt5.py b/src/transformers/models/longt5/modeling_longt5.py index 3a8fb9f66a09fc..114b6564b524af 100644 --- a/src/transformers/models/longt5/modeling_longt5.py +++ b/src/transformers/models/longt5/modeling_longt5.py @@ -518,7 +518,14 @@ def project(hidden_states, proj_layer, key_value_states, past_key_value): if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) - scores += position_bias + if self.pruned_heads: + mask = torch.ones(position_bias.shape[1]) + mask[list(self.pruned_heads)] = 0 + position_bias_masked = position_bias[:, mask.bool()] + else: + position_bias_masked = position_bias + + scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index e4c36109bd7709..1e70ba773aca48 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -528,7 +528,14 @@ def project(hidden_states, proj_layer, key_value_states, past_key_value): if mask is not None: position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length) - scores += position_bias + if self.pruned_heads: + mask = torch.ones(position_bias.shape[1]) + mask[list(self.pruned_heads)] = 0 + position_bias_masked = position_bias[:, mask.bool()] + else: + position_bias_masked = position_bias + + scores += position_bias_masked attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as( scores ) # (batch_size, n_heads, seq_length, key_length) @@ -1802,7 +1809,7 @@ def _prune_heads(self, heads_to_prune): class PreTrainedModel """ for layer, heads in heads_to_prune.items(): - self.encoder.layer[layer].attention.prune_heads(heads) + self.encoder.block[layer].layer[0].SelfAttention.prune_heads(heads) @add_start_docstrings_to_model_forward(T5_ENCODER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) From 3b19c0317b6909e2d7f11b5053895ac55250e7da Mon Sep 17 00:00:00 2001 From: arun99481 Date: Tue, 6 Sep 2022 17:06:37 +0530 Subject: [PATCH 222/539] updating gather function with gather_for_metrics in run_wav2vec2_pretraining (#18877) Co-authored-by: Arun Rajaram --- .../run_wav2vec2_pretraining_no_trainer.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py b/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py index a3db215d08bd4b..0de1776df56d61 100755 --- a/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py +++ b/examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py @@ -596,7 +596,7 @@ def prepare_dataset(batch): # make sure that `num_losses` is summed for distributed training # and average gradients over losses of all devices if accelerator.state.num_processes > 1: - num_losses = accelerator.gather(num_losses).sum() + num_losses = accelerator.gather_for_metrics(num_losses).sum() gradient_multiplier = accelerator.state.num_processes / num_losses multiply_grads(model.module.parameters(), gradient_multiplier) else: @@ -647,10 +647,10 @@ def prepare_dataset(batch): outputs.diversity_loss.detach() if accelerator.state.num_processes > 1: - loss = accelerator.gather(loss).sum() - outputs.contrastive_loss = accelerator.gather(outputs.contrastive_loss).sum() - outputs.diversity_loss = accelerator.gather(outputs.diversity_loss).sum() - percent_masked = accelerator.gather(percent_masked).sum() + loss = accelerator.gather_for_metrics(loss).sum() + outputs.contrastive_loss = accelerator.gather_for_metrics(outputs.contrastive_loss).sum() + outputs.diversity_loss = accelerator.gather_for_metrics(outputs.diversity_loss).sum() + percent_masked = accelerator.gather_for_metrics(percent_masked).sum() train_logs = { "loss": (loss * args.gradient_accumulation_steps) / num_losses, @@ -713,7 +713,7 @@ def prepare_dataset(batch): # sum over devices in multi-processing if accelerator.num_processes > 1: - val_logs = {k: accelerator.gather(v).sum() for k, v in val_logs.items()} + val_logs = {k: accelerator.gather_for_metrics(v).sum() for k, v in val_logs.items()} val_logs = {k: v / val_logs["val_num_losses"] for k, v in val_logs.items()} From f85acb4d73a84fe9bee5279068b0430fc391fb36 Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Tue, 6 Sep 2022 17:42:26 +0530 Subject: [PATCH 223/539] Fix decode_input_ids to bare T5Model and improve doc (#18791) * use tokenizer to output tensor * add preprocessing for decoder_input_ids for bare T5Model * add preprocessing to tf and flax * linting * linting * Update src/transformers/models/t5/modeling_flax_t5.py Co-authored-by: Patrick von Platen * Update src/transformers/models/t5/modeling_tf_t5.py Co-authored-by: Patrick von Platen * Update src/transformers/models/t5/modeling_t5.py Co-authored-by: Patrick von Platen Co-authored-by: Patrick von Platen --- docs/source/en/model_doc/t5.mdx | 7 +++++-- src/transformers/models/t5/modeling_flax_t5.py | 4 ++++ src/transformers/models/t5/modeling_t5.py | 4 ++++ src/transformers/models/t5/modeling_tf_t5.py | 4 ++++ 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/docs/source/en/model_doc/t5.mdx b/docs/source/en/model_doc/t5.mdx index 5a1928923476cb..92cd753b645767 100644 --- a/docs/source/en/model_doc/t5.mdx +++ b/docs/source/en/model_doc/t5.mdx @@ -187,12 +187,15 @@ ignored. The code example below illustrates all of this. >>> # encode the targets >>> target_encoding = tokenizer( -... [output_sequence_1, output_sequence_2], padding="longest", max_length=max_target_length, truncation=True +... [output_sequence_1, output_sequence_2], +... padding="longest", +... max_length=max_target_length, +... truncation=True, +... return_tensors="pt", ... ) >>> labels = target_encoding.input_ids >>> # replace padding token id's of the labels by -100 so it's ignored by the loss ->>> labels = torch.tensor(labels) >>> labels[labels == tokenizer.pad_token_id] = -100 >>> # forward pass diff --git a/src/transformers/models/t5/modeling_flax_t5.py b/src/transformers/models/t5/modeling_flax_t5.py index 918a605fc4813a..2732bf591690f7 100644 --- a/src/transformers/models/t5/modeling_flax_t5.py +++ b/src/transformers/models/t5/modeling_flax_t5.py @@ -1388,6 +1388,10 @@ class FlaxT5Model(FlaxT5PreTrainedModel): ... ).input_ids >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="np").input_ids + >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model. + >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg. + >>> decoder_input_ids = model._shift_right(decoder_input_ids) + >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state diff --git a/src/transformers/models/t5/modeling_t5.py b/src/transformers/models/t5/modeling_t5.py index 1e70ba773aca48..8e414cbd7a5738 100644 --- a/src/transformers/models/t5/modeling_t5.py +++ b/src/transformers/models/t5/modeling_t5.py @@ -1383,6 +1383,10 @@ def forward( ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 + >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model. + >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg. + >>> decoder_input_ids = model._shift_right(decoder_input_ids) + >>> # forward pass >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state diff --git a/src/transformers/models/t5/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py index 091cb9d63eb42d..dc909c8d8f3349 100644 --- a/src/transformers/models/t5/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -1180,6 +1180,10 @@ def call( ... ).input_ids # Batch size 1 >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="tf").input_ids # Batch size 1 + >>> # preprocess: Prepend decoder_input_ids with start token which is pad token for T5Model. + >>> # This is not needed for torch's T5ForConditionalGeneration as it does this internally using labels arg. + >>> decoder_input_ids = model._shift_right(decoder_input_ids) + >>> # forward pass >>> outputs = model(input_ids, decoder_input_ids=decoder_input_ids) >>> last_hidden_states = outputs.last_hidden_state From 998a90bc7d8b3ecda4b1d0ef17e71afeb6729e2e Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 6 Sep 2022 14:51:03 +0200 Subject: [PATCH 224/539] Fix `test_tf_encode_plus_sent_to_model` for `LayoutLMv3` (#18898) Co-authored-by: ydshieh --- .../test_tokenization_layoutlmv3.py | 45 ++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py b/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py index 419b95feee23a6..322ed6861ff693 100644 --- a/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py +++ b/tests/models/layoutlmv3/test_tokenization_layoutlmv3.py @@ -31,7 +31,14 @@ logging, ) from transformers.models.layoutlmv3.tokenization_layoutlmv3 import VOCAB_FILES_NAMES, LayoutLMv3Tokenizer -from transformers.testing_utils import is_pt_tf_cross_test, require_pandas, require_tokenizers, require_torch, slow +from transformers.testing_utils import ( + is_pt_tf_cross_test, + require_pandas, + require_tf, + require_tokenizers, + require_torch, + slow, +) from ...test_tokenization_common import SMALL_TRAINING_CORPUS, TokenizerTesterMixin, merge_model_tokenizer_mappings @@ -2400,3 +2407,39 @@ def test_layoutlmv3_integration_test(self): @unittest.skip("Doesn't support another framework than PyTorch") def test_np_encode_plus_sent_to_model(self): pass + + @require_tf + @slow + def test_tf_encode_plus_sent_to_model(self): + from transformers import TF_MODEL_MAPPING, TOKENIZER_MAPPING + + MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(TF_MODEL_MAPPING, TOKENIZER_MAPPING) + + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: + return + + config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] + config = config_class() + + if config.is_encoder_decoder or config.pad_token_id is None: + return + + model = model_class(config) + + # Make sure the model contains at least the full vocabulary size in its embedding matrix + self.assertGreaterEqual(model.config.vocab_size, len(tokenizer)) + + # Build sequence + first_ten_tokens = list(tokenizer.get_vocab().keys())[:10] + boxes = [[1000, 1000, 1000, 1000] for _ in range(len(first_ten_tokens))] + encoded_sequence = tokenizer.encode_plus(first_ten_tokens, boxes=boxes, return_tensors="tf") + batch_encoded_sequence = tokenizer.batch_encode_plus( + [first_ten_tokens, first_ten_tokens], boxes=[boxes, boxes], return_tensors="tf" + ) + + # This should not fail + model(encoded_sequence) + model(batch_encoded_sequence) From 6678350c01629b848aa9c41e169da5d6b8d9e7e9 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Tue, 6 Sep 2022 16:13:34 +0300 Subject: [PATCH 225/539] fixes bugs to handle non-dict output (#18897) --- src/transformers/models/owlvit/modeling_owlvit.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index badd3db41e70d4..2e4a0110921b42 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -1277,7 +1277,7 @@ def image_text_embedder( ) # Resize class token - image_embeds = outputs.image_embeds + image_embeds = outputs[-3] new_size = tuple(np.array(image_embeds.shape) - np.array((0, 1, 0))) class_token_out = torch.broadcast_to(image_embeds[:, :1, :], new_size) @@ -1293,11 +1293,11 @@ def image_text_embedder( image_embeds.shape[-1], ) image_embeds = image_embeds.reshape(new_size) - text_embeds = outputs.text_embeds + text_embeds = outputs[-4] # Last hidden states from text and vision transformers - text_model_last_hidden_state = outputs.text_model_output.last_hidden_state - vision_model_last_hidden_state = outputs.vision_model_output.last_hidden_state + text_model_last_hidden_state = outputs[-2][0] + vision_model_last_hidden_state = outputs[-1][0] return (text_embeds, image_embeds, text_model_last_hidden_state, vision_model_last_hidden_state) From 71ff88fa4f1e4f63ed0890fc20f4a9d65af79af3 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Tue, 6 Sep 2022 12:34:37 -0400 Subject: [PATCH 226/539] Further reduce the number of alls to head for cached objects (#18871) * Further reduce the number of alls to head for cached models/tokenizers/pipelines * Fix tests * Address review comments --- src/transformers/utils/hub.py | 34 +++++++++++++++++++-- tests/models/auto/test_modeling_auto.py | 3 +- tests/models/auto/test_modeling_tf_auto.py | 3 +- tests/models/auto/test_tokenization_auto.py | 3 +- tests/pipelines/test_pipelines_common.py | 3 +- 5 files changed, 36 insertions(+), 10 deletions(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 9b1e9a5b85eb02..31c3257ffd3646 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -120,6 +120,9 @@ def is_offline_mode(): HUGGINGFACE_CO_PREFIX = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/{model_id}/resolve/{revision}/{filename}" HUGGINGFACE_CO_EXAMPLES_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/examples" +# Return value when trying to load a file from cache but the file does not exist in the distant repo. +_CACHED_NO_EXIST = object() + def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: """ @@ -222,6 +225,22 @@ def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None, commit_hash=None): """ Explores the cache to return the latest cached file for a given revision. + + Args: + cache_dir (`str` or `os.PathLike`): The folder where the cached files lie. + repo_id (`str`): The ID of the repo on huggingface.co. + filename (`str`): The filename to look for inside `repo_id`. + revision (`str`, *optional*): + The specific model version to use. Will default to `"main"` if it's not provided and no `commit_hash` is + provided either. + commit_hash (`str`, *optional*): The (full) commit hash to look for inside the cache. + + Returns: + `Optional[str]` or `_CACHED_NO_EXIST`: + Will return `None` if the file was not cached. Otherwise: + - The exact path to the cached file if it's found in the cache + - A special value `_CACHED_NO_EXIST` if the file does not exist at the given commit hash and this fact was + cached. """ if commit_hash is not None and revision is not None: raise ValueError("`commit_hash` and `revision` are mutually exclusive, pick one only.") @@ -244,6 +263,9 @@ def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None, commit_h with open(os.path.join(model_cache, "refs", revision)) as f: commit_hash = f.read() + if os.path.isfile(os.path.join(model_cache, ".no_exist", commit_hash, filename)): + return _CACHED_NO_EXIST + cached_shas = os.listdir(os.path.join(model_cache, "snapshots")) if commit_hash not in cached_shas: # No cache for this revision and we won't try to return a random revision @@ -338,7 +360,10 @@ def cached_file( resolved_file = os.path.join(os.path.join(path_or_repo_id, subfolder), filename) if not os.path.isfile(resolved_file): if _raise_exceptions_for_missing_entries: - raise EnvironmentError(f"Could not locate {full_filename} inside {path_or_repo_id}.") + raise EnvironmentError( + f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " + f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files." + ) else: return None return resolved_file @@ -352,7 +377,12 @@ def cached_file( # If the file is cached under that commit hash, we return it directly. resolved_file = try_to_load_from_cache(cache_dir, path_or_repo_id, full_filename, commit_hash=_commit_hash) if resolved_file is not None: - return resolved_file + if resolved_file is not _CACHED_NO_EXIST: + return resolved_file + elif not _raise_exceptions_for_missing_entries: + return None + else: + raise EnvironmentError(f"Could not locate {full_filename} inside {path_or_repo_id}.") user_agent = http_user_agent(user_agent) try: diff --git a/tests/models/auto/test_modeling_auto.py b/tests/models/auto/test_modeling_auto.py index 2e1e51a81daac6..91222c4d0062ee 100644 --- a/tests/models/auto/test_modeling_auto.py +++ b/tests/models/auto/test_modeling_auto.py @@ -370,6 +370,5 @@ def test_cached_model_has_minimum_calls_to_head(self): with RequestCounter() as counter: _ = AutoModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") self.assertEqual(counter.get_request_count, 0) - # There is no pytorch_model.bin so we still get one call for this one. - self.assertEqual(counter.head_request_count, 2) + self.assertEqual(counter.head_request_count, 1) self.assertEqual(counter.other_request_count, 0) diff --git a/tests/models/auto/test_modeling_tf_auto.py b/tests/models/auto/test_modeling_tf_auto.py index bbde4f582bdfb0..2b4b625e2305c3 100644 --- a/tests/models/auto/test_modeling_tf_auto.py +++ b/tests/models/auto/test_modeling_tf_auto.py @@ -303,6 +303,5 @@ def test_cached_model_has_minimum_calls_to_head(self): with RequestCounter() as counter: _ = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded") self.assertEqual(counter.get_request_count, 0) - # There is no pytorch_model.bin so we still get one call for this one. - self.assertEqual(counter.head_request_count, 2) + self.assertEqual(counter.head_request_count, 1) self.assertEqual(counter.other_request_count, 0) diff --git a/tests/models/auto/test_tokenization_auto.py b/tests/models/auto/test_tokenization_auto.py index 830362e29cd654..020eea72cdda21 100644 --- a/tests/models/auto/test_tokenization_auto.py +++ b/tests/models/auto/test_tokenization_auto.py @@ -349,6 +349,5 @@ def test_cached_tokenizer_has_minimum_calls_to_head(self): with RequestCounter() as counter: _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") self.assertEqual(counter.get_request_count, 0) - # We still have one extra call because the model does not have a added_tokens.json file - self.assertEqual(counter.head_request_count, 2) + self.assertEqual(counter.head_request_count, 1) self.assertEqual(counter.other_request_count, 0) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 5e0296c7136725..ea32f5cac4d467 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -884,8 +884,7 @@ def test_cached_pipeline_has_minimum_calls_to_head(self): with RequestCounter() as counter: _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") self.assertEqual(counter.get_request_count, 0) - # We still have one extra call because the model does not have a added_tokens.json file - self.assertEqual(counter.head_request_count, 2) + self.assertEqual(counter.head_request_count, 1) self.assertEqual(counter.other_request_count, 0) From 7d5fde991d598370d961be8cb7add6541e2b59ce Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 6 Sep 2022 18:42:00 +0200 Subject: [PATCH 227/539] unpin slack_sdk version (#18901) Co-authored-by: ydshieh --- .github/workflows/doctests.yml | 2 +- .github/workflows/self-nightly-scheduled.yml | 2 +- .github/workflows/self-past.yml | 2 +- .github/workflows/self-push.yml | 2 +- .github/workflows/self-scheduled.yml | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/doctests.yml b/.github/workflows/doctests.yml index 8a8968be52d582..9fc74e2e6cf8c2 100644 --- a/.github/workflows/doctests.yml +++ b/.github/workflows/doctests.yml @@ -74,5 +74,5 @@ jobs: CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_DOCS }} CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} run: | - pip install slack_sdk==3.18.1 + pip install slack_sdk python utils/notification_service_doc_tests.py diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index 1cdbd6982b71f7..f1822be911c34b 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -236,5 +236,5 @@ jobs: # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | - pip install slack_sdk==3.18.1 + pip install slack_sdk python utils/notification_service.py "${{ needs.setup.outputs.matrix }}" diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index c9776899775e24..b3871dc92fa47f 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -180,7 +180,7 @@ jobs: # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | - pip install slack_sdk==3.18.1 + pip install slack_sdk python utils/notification_service.py "${{ needs.setup.outputs.matrix }}" # Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack. diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index f2f88ad1b9a849..56e425570c6b29 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -533,5 +533,5 @@ jobs: # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | - pip install slack_sdk==3.18.1 + pip install slack_sdk python utils/notification_service.py "${{ needs.setup.outputs.matrix }}" diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 34380faa4eca1f..82d33babf4ef32 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -361,5 +361,5 @@ jobs: # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | - pip install slack_sdk==3.18.1 + pip install slack_sdk python utils/notification_service.py "${{ needs.setup.outputs.matrix }}" From 0a632f076d6b275690176b79c64c5559e1240b05 Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Wed, 7 Sep 2022 00:50:12 +0530 Subject: [PATCH 228/539] Fix incorrect size of input for 1st strided window length in `Perplexity of fixed-length models` (#18906) * update the PPL for stride 512 * fix 1st strided window size * linting * fix typo * styling --- docs/source/en/perplexity.mdx | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/docs/source/en/perplexity.mdx b/docs/source/en/perplexity.mdx index 3706a40091c2ca..01f861c99c5ea2 100644 --- a/docs/source/en/perplexity.mdx +++ b/docs/source/en/perplexity.mdx @@ -101,22 +101,32 @@ from tqdm import tqdm max_length = model.config.n_positions stride = 512 +seq_len = encodings.input_ids.size(1) nlls = [] -for i in tqdm(range(0, encodings.input_ids.size(1), stride)): - begin_loc = max(i + stride - max_length, 0) - end_loc = min(i + stride, encodings.input_ids.size(1)) - trg_len = end_loc - i # may be different from stride on last loop +prev_end_loc = 0 +for begin_loc in tqdm(range(0, seq_len, stride)): + end_loc = min(begin_loc + max_length, seq_len) + trg_len = end_loc - prev_end_loc # may be different from stride on last loop input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device) target_ids = input_ids.clone() target_ids[:, :-trg_len] = -100 with torch.no_grad(): outputs = model(input_ids, labels=target_ids) - neg_log_likelihood = outputs[0] * trg_len + + # loss is calculated using CrossEntropyLoss which averages over input tokens. + # Multiply it with trg_len to get the summation instead of average. + # We will take average over all the tokens to get the true average + # in the last step of this example. + neg_log_likelihood = outputs.loss * trg_len nlls.append(neg_log_likelihood) + prev_end_loc = end_loc + if end_loc == seq_len: + break + ppl = torch.exp(torch.stack(nlls).sum() / end_loc) ``` @@ -126,5 +136,5 @@ and the better the reported perplexity will typically be. When we run the above with `stride = 1024`, i.e. no overlap, the resulting PPL is `19.64`, which is about the same as the `19.93` reported in the GPT-2 paper. By using `stride = 512` and thereby employing our striding window -strategy, this jumps down to `16.53`. This is not only a more favorable score, but is calculated in a way that is +strategy, this jumps down to `16.44`. This is not only a more favorable score, but is calculated in a way that is closer to the true autoregressive decomposition of a sequence likelihood. From c25f27fa6a2c04fb344a55f817b4976dd823c0c9 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Wed, 7 Sep 2022 12:24:12 +0200 Subject: [PATCH 229/539] [VideoMAE] Improve code examples (#18919) * Simplify code example * Add seed --- .../models/videomae/modeling_videomae.py | 29 +++++++++---------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/src/transformers/models/videomae/modeling_videomae.py b/src/transformers/models/videomae/modeling_videomae.py index a807ed7208fccb..7efff490d8c1b6 100644 --- a/src/transformers/models/videomae/modeling_videomae.py +++ b/src/transformers/models/videomae/modeling_videomae.py @@ -598,21 +598,18 @@ def forward( >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) - >>> vr = VideoReader(file_path, num_threads=1, ctx=cpu(0)) + >>> videoreader = VideoReader(file_path, num_threads=1, ctx=cpu(0)) >>> # sample 16 frames - >>> vr.seek(0) - >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=4, seg_len=len(vr)) - >>> buffer = vr.get_batch(indices).asnumpy() - - >>> # create a list of NumPy arrays - >>> video = [buffer[i] for i in range(buffer.shape[0])] + >>> videoreader.seek(0) + >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=4, seg_len=len(videoreader)) + >>> video = videoreader.get_batch(indices).asnumpy() >>> feature_extractor = VideoMAEFeatureExtractor.from_pretrained("MCG-NJU/videomae-base") >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base") >>> # prepare video for the model - >>> inputs = feature_extractor(video, return_tensors="pt") + >>> inputs = feature_extractor(list(video), return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) @@ -943,10 +940,13 @@ def forward( ```python >>> from decord import VideoReader, cpu >>> import torch + >>> import numpy as np >>> from transformers import VideoMAEFeatureExtractor, VideoMAEForVideoClassification >>> from huggingface_hub import hf_hub_download + >>> np.random.seed(0) + >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len): ... converted_len = int(clip_len * frame_sample_rate) @@ -961,20 +961,17 @@ def forward( >>> file_path = hf_hub_download( ... repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset" ... ) - >>> vr = VideoReader(file_path, num_threads=1, ctx=cpu(0)) + >>> videoreader = VideoReader(file_path, num_threads=1, ctx=cpu(0)) >>> # sample 16 frames - >>> vr.seek(0) - >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=4, seg_len=len(vr)) - >>> buffer = vr.get_batch(indices).asnumpy() - - >>> # create a list of NumPy arrays - >>> video = [buffer[i] for i in range(buffer.shape[0])] + >>> videoreader.seek(0) + >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=4, seg_len=len(videoreader)) + >>> video = videoreader.get_batch(indices).asnumpy() >>> feature_extractor = VideoMAEFeatureExtractor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics") >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics") - >>> inputs = feature_extractor(video, return_tensors="pt") + >>> inputs = feature_extractor(list(video), return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) From 7a8118947f3c6a802a9f63dc22c394961d38860f Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 7 Sep 2022 12:51:37 +0200 Subject: [PATCH 230/539] Add checks for more workflow jobs (#18905) * add check for scheduled CI * Add check to other CIs Co-authored-by: ydshieh --- .github/workflows/self-nightly-scheduled.yml | 32 +++++++++++-- .github/workflows/self-past.yml | 30 +++++++++++-- .github/workflows/self-scheduled.yml | 47 +++++++++++++++++--- 3 files changed, 95 insertions(+), 14 deletions(-) diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index f1822be911c34b..2b3283abf3b215 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -23,8 +23,23 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: + run_check_runners: + name: Check Runners + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} + container: + image: huggingface/transformers-all-latest-torch-nightly-gpu + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + steps: + - name: NVIDIA-SMI + run: | + nvidia-smi + setup: name: Setup + needs: run_check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -68,7 +83,7 @@ jobs: container: image: huggingface/transformers-all-latest-torch-nightly-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + needs: [run_check_runners, setup] steps: - name: Echo folder ${{ matrix.folders }} shell: bash @@ -121,7 +136,7 @@ jobs: container: image: huggingface/transformers-all-latest-torch-nightly-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + needs: [run_check_runners, setup] steps: - name: Echo folder ${{ matrix.folders }} shell: bash @@ -170,7 +185,7 @@ jobs: matrix: machine_type: [single-gpu, multi-gpu] runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} - needs: setup + needs: [run_check_runners, setup] container: image: huggingface/transformers-pytorch-deepspeed-nightly-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ @@ -221,8 +236,15 @@ jobs: name: Send results to webhook runs-on: ubuntu-latest if: always() - needs: [setup, run_tests_single_gpu, run_tests_multi_gpu, run_all_tests_torch_cuda_extensions_gpu] + needs: [run_check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, run_all_tests_torch_cuda_extensions_gpu] steps: + - name: Preliminary job status + shell: bash + # For the meaning of these environment variables, see the job `Setup` + run: | + echo "Runner status: ${{ needs.run_check_runners.result }}" + echo "Setup status: ${{ needs.setup.result }}" + - uses: actions/checkout@v2 - uses: actions/download-artifact@v2 - name: Send message to Slack @@ -233,6 +255,8 @@ jobs: CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }} CI_EVENT: nightly-build + SETUP_STATUS: ${{ needs.setup.result }} + RUNNER_STATUS: ${{ needs.run_check_runners.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index b3871dc92fa47f..8e9130023bb36c 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -50,6 +50,21 @@ jobs: cd tests echo "::set-output name=matrix::$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" + run_check_runners: + name: Check Runners + needs: setup + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} + container: + image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + steps: + - name: NVIDIA-SMI + run: | + nvidia-smi + run_tests_single_gpu: name: Model tests strategy: @@ -61,7 +76,7 @@ jobs: container: image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + needs: [setup, run_check_runners] steps: - name: Update clone working-directory: /transformers @@ -114,7 +129,7 @@ jobs: container: image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + needs: [setup, run_check_runners] steps: - name: Update clone working-directory: /transformers @@ -160,8 +175,15 @@ jobs: name: Send results to webhook runs-on: ubuntu-latest if: always() - needs: [setup, run_tests_single_gpu, run_tests_multi_gpu] + needs: [setup, run_check_runners, run_tests_single_gpu, run_tests_multi_gpu] steps: + - name: Preliminary job status + shell: bash + # For the meaning of these environment variables, see the job `Setup` + run: | + echo "Runner status: ${{ needs.run_check_runners.result }}" + echo "Setup status: ${{ needs.setup.result }}" + - uses: actions/checkout@v2 - uses: actions/download-artifact@v2 @@ -177,6 +199,8 @@ jobs: CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }} CI_EVENT: Past CI - ${{ inputs.framework }}-${{ inputs.version }} + SETUP_STATUS: ${{ needs.setup.result }} + RUNNER_STATUS: ${{ needs.run_check_runners.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 82d33babf4ef32..a0a10921ae825b 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -22,8 +22,23 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: + run_check_runners: + name: Check Runners + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} + container: + image: huggingface/transformers-all-latest-gpu + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + steps: + - name: NVIDIA-SMI + run: | + nvidia-smi + setup: name: Setup + needs: run_check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -67,7 +82,7 @@ jobs: container: image: huggingface/transformers-all-latest-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + needs: [run_check_runners, setup] steps: - name: Echo folder ${{ matrix.folders }} shell: bash @@ -120,7 +135,7 @@ jobs: container: image: huggingface/transformers-all-latest-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + needs: [run_check_runners, setup] steps: - name: Echo folder ${{ matrix.folders }} shell: bash @@ -168,7 +183,7 @@ jobs: container: image: huggingface/transformers-all-latest-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + needs: [run_check_runners, setup] steps: - name: Update clone working-directory: /transformers @@ -211,7 +226,7 @@ jobs: container: image: huggingface/transformers-pytorch-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + needs: [run_check_runners, setup] steps: - name: Update clone working-directory: /transformers @@ -255,7 +270,7 @@ jobs: container: image: huggingface/transformers-tensorflow-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: setup + needs: [run_check_runners, setup] steps: - name: Update clone working-directory: /transformers @@ -297,7 +312,7 @@ jobs: matrix: machine_type: [single-gpu, multi-gpu] runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} - needs: setup + needs: [run_check_runners, setup] container: image: huggingface/transformers-pytorch-deepspeed-latest-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ @@ -346,8 +361,24 @@ jobs: name: Send results to webhook runs-on: ubuntu-latest if: always() - needs: [setup, run_tests_single_gpu, run_tests_multi_gpu, run_examples_gpu, run_pipelines_tf_gpu, run_pipelines_torch_gpu, run_all_tests_torch_cuda_extensions_gpu] + needs: [ + run_check_runners, + setup, + run_tests_single_gpu, + run_tests_multi_gpu, + run_examples_gpu, + run_pipelines_tf_gpu, + run_pipelines_torch_gpu, + run_all_tests_torch_cuda_extensions_gpu + ] steps: + - name: Preliminary job status + shell: bash + # For the meaning of these environment variables, see the job `Setup` + run: | + echo "Runner status: ${{ needs.run_check_runners.result }}" + echo "Setup status: ${{ needs.setup.result }}" + - uses: actions/checkout@v2 - uses: actions/download-artifact@v2 - name: Send message to Slack @@ -358,6 +389,8 @@ jobs: CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} CI_EVENT: scheduled + SETUP_STATUS: ${{ needs.setup.result }} + RUNNER_STATUS: ${{ needs.run_check_runners.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | From 4f299b2446759d17296550e53dd66ea3ab0b3359 Mon Sep 17 00:00:00 2001 From: Nicholas Broad Date: Wed, 7 Sep 2022 07:46:26 -0400 Subject: [PATCH 231/539] Accelerator end training (#18910) * add accelerator.end_training() Some trackers need this to end their runs. * fixup and quality * add space * add space again ?!? --- .../run_image_classification_no_trainer.py | 3 +++ examples/pytorch/language-modeling/run_clm_no_trainer.py | 3 +++ examples/pytorch/language-modeling/run_mlm_no_trainer.py | 3 +++ examples/pytorch/multiple-choice/run_swag_no_trainer.py | 3 +++ .../run_semantic_segmentation_no_trainer.py | 3 +++ examples/pytorch/text-classification/run_glue_no_trainer.py | 3 +++ examples/pytorch/token-classification/run_ner_no_trainer.py | 3 +++ examples/pytorch/translation/run_translation_no_trainer.py | 3 +++ 8 files changed, 24 insertions(+) diff --git a/examples/pytorch/image-classification/run_image_classification_no_trainer.py b/examples/pytorch/image-classification/run_image_classification_no_trainer.py index 7037ab6c82bff5..b6e27de0dea057 100644 --- a/examples/pytorch/image-classification/run_image_classification_no_trainer.py +++ b/examples/pytorch/image-classification/run_image_classification_no_trainer.py @@ -553,6 +553,9 @@ def collate_fn(examples): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) + if args.with_tracking: + accelerator.end_training() + if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index dee0fee8a070e7..c0fcbbd3ce5857 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -648,6 +648,9 @@ def group_texts(examples): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) + if args.with_tracking: + accelerator.end_training() + if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index 9dd519d11e3d10..2a1951f83de217 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -693,6 +693,9 @@ def group_texts(examples): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) + if args.with_tracking: + accelerator.end_training() + if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) diff --git a/examples/pytorch/multiple-choice/run_swag_no_trainer.py b/examples/pytorch/multiple-choice/run_swag_no_trainer.py index aed2ad8aa99ac1..43dee8bfdb305f 100755 --- a/examples/pytorch/multiple-choice/run_swag_no_trainer.py +++ b/examples/pytorch/multiple-choice/run_swag_no_trainer.py @@ -637,6 +637,9 @@ def preprocess_function(examples): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) + if args.with_tracking: + accelerator.end_training() + if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index dc1dba2f233b8b..cd30a30daa8b41 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -662,6 +662,9 @@ def preprocess_val(example_batch): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) + if args.with_tracking: + accelerator.end_training() + if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) diff --git a/examples/pytorch/text-classification/run_glue_no_trainer.py b/examples/pytorch/text-classification/run_glue_no_trainer.py index 3720c9d09bb451..fadb0148313e33 100644 --- a/examples/pytorch/text-classification/run_glue_no_trainer.py +++ b/examples/pytorch/text-classification/run_glue_no_trainer.py @@ -590,6 +590,9 @@ def preprocess_function(examples): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) + if args.with_tracking: + accelerator.end_training() + if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) diff --git a/examples/pytorch/token-classification/run_ner_no_trainer.py b/examples/pytorch/token-classification/run_ner_no_trainer.py index 72b9ed3af4b833..4aee8c7ebacbfd 100755 --- a/examples/pytorch/token-classification/run_ner_no_trainer.py +++ b/examples/pytorch/token-classification/run_ner_no_trainer.py @@ -746,6 +746,9 @@ def compute_metrics(): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) + if args.with_tracking: + accelerator.end_training() + if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) diff --git a/examples/pytorch/translation/run_translation_no_trainer.py b/examples/pytorch/translation/run_translation_no_trainer.py index d16c7d5f1c8fd5..92bc97e355f884 100644 --- a/examples/pytorch/translation/run_translation_no_trainer.py +++ b/examples/pytorch/translation/run_translation_no_trainer.py @@ -728,6 +728,9 @@ def postprocess_text(preds, labels): output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) + if args.with_tracking: + accelerator.end_training() + if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) From d842f2d5b9bd4e361644c332bf9dc7f9b064f581 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Wed, 7 Sep 2022 20:01:30 +0800 Subject: [PATCH 232/539] update the train_batch_size in case HPO change batch_size_per_device (#18918) Signed-off-by: Wang, Yi A Signed-off-by: Wang, Yi A --- src/transformers/trainer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index ae1b2458524817..f7c3836d4afd42 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1488,6 +1488,7 @@ def train( raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") # This might change the seed so needs to run first. self._hp_search_setup(trial) + self._train_batch_size = self.args.train_batch_size # Model re-init model_reloaded = False From 2b9513fdabbcfd3ca5d7003a955be633a2f365fc Mon Sep 17 00:00:00 2001 From: Matt Date: Wed, 7 Sep 2022 13:30:07 +0100 Subject: [PATCH 233/539] Update TF fine-tuning docs (#18654) * Update TF fine-tuning docs * Fix formatting * Add some section headers so the right sidebar works better * Squiggly it * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> * Update docs/source/en/training.mdx Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Explain things in the text, not the comments * Make the two dataset creation methods into a list * Move the advice about collation out of a * Edits for clarity * Edits for clarity * Edits for clarity * Replace `to_tf_dataset` with `prepare_tf_dataset` in the fine-tuning pages * Restructure the page a little bit * Restructure the page a little bit * Restructure the page a little bit Co-authored-by: Steven Liu <59462357+stevhliu@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- docs/source/en/tasks/language_modeling.mdx | 24 ++-- docs/source/en/tasks/multiple_choice.mdx | 17 +-- docs/source/en/tasks/question_answering.mdx | 12 +- .../en/tasks/sequence_classification.mdx | 11 +- docs/source/en/tasks/summarization.mdx | 10 +- docs/source/en/tasks/token_classification.mdx | 10 +- docs/source/en/tasks/translation.mdx | 12 +- docs/source/en/training.mdx | 121 +++++++++++++----- 8 files changed, 130 insertions(+), 87 deletions(-) diff --git a/docs/source/en/tasks/language_modeling.mdx b/docs/source/en/tasks/language_modeling.mdx index f410bd5a557291..82708f2f899a9a 100644 --- a/docs/source/en/tasks/language_modeling.mdx +++ b/docs/source/en/tasks/language_modeling.mdx @@ -245,20 +245,18 @@ At this point, only three steps remain: ``` -To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~datasets.Dataset.to_tf_dataset`]. Specify inputs and labels in `columns`, whether to shuffle the dataset order, batch size, and the data collator: +To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py ->>> tf_train_set = lm_dataset["train"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], -... dummy_labels=True, +>>> tf_train_set = model.prepare_tf_dataset( +... lm_dataset["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) ->>> tf_test_set = lm_dataset["test"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], -... dummy_labels=True, +>>> tf_test_set = model.prepare_tf_dataset( +... lm_dataset["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, @@ -352,20 +350,18 @@ At this point, only three steps remain: ``` -To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~datasets.Dataset.to_tf_dataset`]. Specify inputs and labels in `columns`, whether to shuffle the dataset order, batch size, and the data collator: +To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py ->>> tf_train_set = lm_dataset["train"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], -... dummy_labels=True, +>>> tf_train_set = model.prepare_tf_dataset( +... lm_dataset["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) ->>> tf_test_set = lm_dataset["test"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], -... dummy_labels=True, +>>> tf_test_set = model.prepare_tf_dataset( +... lm_dataset["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, diff --git a/docs/source/en/tasks/multiple_choice.mdx b/docs/source/en/tasks/multiple_choice.mdx index b8eb528497036d..6ee0d7137f9ce8 100644 --- a/docs/source/en/tasks/multiple_choice.mdx +++ b/docs/source/en/tasks/multiple_choice.mdx @@ -224,21 +224,19 @@ At this point, only three steps remain: ``` -To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~datasets.Dataset.to_tf_dataset`]. Specify inputs in `columns`, targets in `label_cols`, whether to shuffle the dataset order, batch size, and the data collator: +To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py >>> data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer) ->>> tf_train_set = tokenized_swag["train"].to_tf_dataset( -... columns=["attention_mask", "input_ids"], -... label_cols=["labels"], +>>> tf_train_set = model.prepare_tf_dataset( +... tokenized_swag["train"], ... shuffle=True, ... batch_size=batch_size, ... collate_fn=data_collator, ... ) ->>> tf_validation_set = tokenized_swag["validation"].to_tf_dataset( -... columns=["attention_mask", "input_ids"], -... label_cols=["labels"], +>>> tf_validation_set = model.prepare_tf_dataset( +... tokenized_swag["validation"], ... shuffle=False, ... batch_size=batch_size, ... collate_fn=data_collator, @@ -273,10 +271,7 @@ Load BERT with [`TFAutoModelForMultipleChoice`]: Configure the model for training with [`compile`](https://keras.io/api/models/model_training_apis/#compile-method): ```py ->>> model.compile( -... optimizer=optimizer, -... loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), -... ) +>>> model.compile(optimizer=optimizer) ``` Call [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) to fine-tune the model: diff --git a/docs/source/en/tasks/question_answering.mdx b/docs/source/en/tasks/question_answering.mdx index 2cb54760e8796e..218fa7bb5520ef 100644 --- a/docs/source/en/tasks/question_answering.mdx +++ b/docs/source/en/tasks/question_answering.mdx @@ -199,20 +199,18 @@ At this point, only three steps remain: ``` -To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~datasets.Dataset.to_tf_dataset`]. Specify inputs and the start and end positions of an answer in `columns`, whether to shuffle the dataset order, batch size, and the data collator: +To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py ->>> tf_train_set = tokenized_squad["train"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "start_positions", "end_positions"], -... dummy_labels=True, +>>> tf_train_set = model.prepare_tf_dataset( +... tokenized_squad["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) ->>> tf_validation_set = tokenized_squad["validation"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "start_positions", "end_positions"], -... dummy_labels=True, +>>> tf_validation_set = model.prepare_tf_dataset( +... tokenized_squad["validation"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, diff --git a/docs/source/en/tasks/sequence_classification.mdx b/docs/source/en/tasks/sequence_classification.mdx index 44729dc28f4e85..2ef8a9619ce55f 100644 --- a/docs/source/en/tasks/sequence_classification.mdx +++ b/docs/source/en/tasks/sequence_classification.mdx @@ -144,18 +144,19 @@ At this point, only three steps remain: -To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~datasets.Dataset.to_tf_dataset`]. Specify inputs and labels in `columns`, whether to shuffle the dataset order, batch size, and the data collator: +To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~TFPreTrainedModel.prepare_tf_dataset`]. + ```py ->>> tf_train_set = tokenized_imdb["train"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "label"], +>>> tf_train_set = model.prepare_tf_dataset( +... tokenized_imdb["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) ->>> tf_validation_set = tokenized_imdb["test"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "label"], +>>> tf_validation_set = model.prepare_tf_dataset( +... tokenized_imdb["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, diff --git a/docs/source/en/tasks/summarization.mdx b/docs/source/en/tasks/summarization.mdx index f636141a15076d..1b2eafcb5f9f1d 100644 --- a/docs/source/en/tasks/summarization.mdx +++ b/docs/source/en/tasks/summarization.mdx @@ -159,18 +159,18 @@ At this point, only three steps remain: ``` -To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~datasets.Dataset.to_tf_dataset`]. Specify inputs and labels in `columns`, whether to shuffle the dataset order, batch size, and the data collator: +To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py ->>> tf_train_set = tokenized_billsum["train"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], +>>> tf_train_set = model.prepare_tf_dataset( +... tokenized_billsum["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) ->>> tf_test_set = tokenized_billsum["test"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], +>>> tf_test_set = model.prepare_tf_dataset( +... tokenized_billsum["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, diff --git a/docs/source/en/tasks/token_classification.mdx b/docs/source/en/tasks/token_classification.mdx index aa5739534f9fb8..3d2a3ccb05cc30 100644 --- a/docs/source/en/tasks/token_classification.mdx +++ b/docs/source/en/tasks/token_classification.mdx @@ -199,18 +199,18 @@ At this point, only three steps remain: ``` -To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~datasets.Dataset.to_tf_dataset`]. Specify inputs and labels in `columns`, whether to shuffle the dataset order, batch size, and the data collator: +To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py ->>> tf_train_set = tokenized_wnut["train"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], +>>> tf_train_set = model.prepare_tf_dataset( +... tokenized_wnut["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) ->>> tf_validation_set = tokenized_wnut["validation"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], +>>> tf_validation_set = model.prepare_tf_dataset( +... tokenized_wnut["validation"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, diff --git a/docs/source/en/tasks/translation.mdx b/docs/source/en/tasks/translation.mdx index d17b8704141861..7439bc7b61c40b 100644 --- a/docs/source/en/tasks/translation.mdx +++ b/docs/source/en/tasks/translation.mdx @@ -175,18 +175,18 @@ At this point, only three steps remain: ``` -To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~datasets.Dataset.to_tf_dataset`]. Specify inputs and labels in `columns`, whether to shuffle the dataset order, batch size, and the data collator: +To fine-tune a model in TensorFlow, start by converting your datasets to the `tf.data.Dataset` format with [`~TFPreTrainedModel.prepare_tf_dataset`]. ```py ->>> tf_train_set = tokenized_books["train"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], +>>> tf_train_set = model.prepare_tf_dataset( +... tokenized_books["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) ->>> tf_test_set = tokenized_books["test"].to_tf_dataset( -... columns=["attention_mask", "input_ids", "labels"], +>>> tf_test_set = model.prepare_tf_dataset( +... tokenized_books["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, @@ -216,7 +216,7 @@ Configure the model for training with [`compile`](https://keras.io/api/models/mo Call [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) to fine-tune the model: ```py ->>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3) +>>> model.fit(tf_train_set, validation_data=tf_test_set, epochs=3) ``` diff --git a/docs/source/en/training.mdx b/docs/source/en/training.mdx index 9222d27ac81f6e..89f5c3148b54a6 100644 --- a/docs/source/en/training.mdx +++ b/docs/source/en/training.mdx @@ -65,10 +65,16 @@ If you like, you can create a smaller subset of the full dataset to fine-tune on ## Train +At this point, you should follow the section corresponding to the framework you want to use. You can use the links +in the right sidebar to jump to the one you want - and if you want to hide all of the content for a given framework, +just use the button at the top-right of that framework's block! + +## Train with PyTorch Trainer + 🤗 Transformers provides a [`Trainer`] class optimized for training 🤗 Transformers models, making it easier to start training without manually writing your own training loop. The [`Trainer`] API supports a wide range of training options and features such as logging, gradient accumulation, and mixed precision. Start by loading your model and specify the number of expected labels. From the Yelp Review [dataset card](https://huggingface.co/datasets/yelp_review_full#data-fields), you know there are five labels: @@ -151,66 +157,113 @@ Then fine-tune your model by calling [`~transformers.Trainer.train`]: -🤗 Transformers models also supports training in TensorFlow with the Keras API. +## Train a TensorFlow model with Keras + +You can also train 🤗 Transformers models in TensorFlow with the Keras API! + +### Loading data for Keras + +When you want to train a 🤗 Transformers model with the Keras API, you need to convert your dataset to a format that +Keras understands. If your dataset is small, you can just convert the whole thing to NumPy arrays and pass it to Keras. +Let's try that first before we do anything more complicated. + +First, load a dataset. We'll use the CoLA dataset from the [GLUE benchmark](https://huggingface.co/datasets/glue), +since it's a simple binary text classification task, and just take the training split for now. + +```py +from datasets import load_dataset -### Convert dataset to TensorFlow format +dataset = load_dataset("glue", "cola") +dataset = dataset["train"] # Just take the training split for now +``` -The [`DefaultDataCollator`] assembles tensors into a batch for the model to train on. Make sure you specify `return_tensors` to return TensorFlow tensors: +Next, load a tokenizer and tokenize the data as NumPy arrays. Note that the labels are already a list of 0 and 1s, +so we can just convert that directly to a NumPy array without tokenization! ```py ->>> from transformers import DefaultDataCollator +from transformers import AutoTokenizer ->>> data_collator = DefaultDataCollator(return_tensors="tf") +tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") +tokenized_data = tokenizer(dataset["text"], return_tensors="np", padding=True) + +labels = np.array(dataset["label"]) # Label is already an array of 0 and 1 +``` + +Finally, load, [`compile`](https://keras.io/api/models/model_training_apis/#compile-method), and [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) the model: + +```py +from transformers import TFAutoModelForSequenceClassification +from tensorflow.keras.optimizers import Adam + +# Load and compile our model +model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased") +# Lower learning rates are often better for fine-tuning transformers +model.compile(optimizer=Adam(3e-5)) + +model.fit(tokenized_data, labels) ``` -[`Trainer`] uses [`DataCollatorWithPadding`] by default so you don't need to explicitly specify a data collator. +You don't have to pass a loss argument to your models when you `compile()` them! Hugging Face models automatically +choose a loss that is appropriate for their task and model architecture if this argument is left blank. You can always +override this by specifying a loss yourself if you want to! -Next, convert the tokenized datasets to TensorFlow datasets with the [`~datasets.Dataset.to_tf_dataset`] method. Specify your inputs in `columns`, and your label in `label_cols`: +This approach works great for smaller datasets, but for larger datasets, you might find it starts to become a problem. Why? +Because the tokenized array and labels would have to be fully loaded into memory, and because NumPy doesn’t handle +“jagged” arrays, so every tokenized sample would have to be padded to the length of the longest sample in the whole +dataset. That’s going to make your array even bigger, and all those padding tokens will slow down training too! + +### Loading data as a tf.data.Dataset + +If you want to avoid slowing down training, you can load your data as a `tf.data.Dataset` instead. Although you can write your own +`tf.data` pipeline if you want, we have two convenience methods for doing this: + +- [`~TFPreTrainedModel.prepare_tf_dataset`]: This is the method we recommend in most cases. Because it is a method +on your model, it can inspect the model to automatically figure out which columns are usable as model inputs, and +discard the others to make a simpler, more performant dataset. +- [`~datasets.Dataset.to_tf_dataset`]: This method is more low-level, and is useful when you want to exactly control how +your dataset is created, by specifying exactly which `columns` and `label_cols` to include. + +Before you can use [`~TFPreTrainedModel.prepare_tf_dataset`], you will need to add the tokenizer outputs to your dataset as columns, as shown in +the following code sample: ```py ->>> tf_train_dataset = small_train_dataset.to_tf_dataset( -... columns=["attention_mask", "input_ids", "token_type_ids"], -... label_cols=["labels"], -... shuffle=True, -... collate_fn=data_collator, -... batch_size=8, -... ) +def tokenize_dataset(data): + # Keys of the returned dictionary will be added to the dataset as columns + return tokenizer(data["text"]) ->>> tf_validation_dataset = small_eval_dataset.to_tf_dataset( -... columns=["attention_mask", "input_ids", "token_type_ids"], -... label_cols=["labels"], -... shuffle=False, -... collate_fn=data_collator, -... batch_size=8, -... ) + +dataset = dataset.map(tokenize_dataset) ``` -### Compile and fit +Remember that Hugging Face datasets are stored on disk by default, so this will not inflate your memory usage! Once the +columns have been added, you can stream batches from the dataset and add padding to each batch, which greatly +reduces the number of padding tokens compared to padding the entire dataset. -Load a TensorFlow model with the expected number of labels: ```py ->>> import tensorflow as tf ->>> from transformers import TFAutoModelForSequenceClassification - ->>> model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5) +>>> tf_dataset = model.prepare_tf_dataset(dataset, batch_size=16, shuffle=True, tokenizer=tokenizer) ``` -Then compile and fine-tune your model with [`fit`](https://keras.io/api/models/model_training_apis/) as you would with any other Keras model: +Note that in the code sample above, you need to pass the tokenizer to `prepare_tf_dataset` so it can correctly pad batches as they're loaded. +If all the samples in your dataset are the same length and no padding is necessary, you can skip this argument. +If you need to do something more complex than just padding samples (e.g. corrupting tokens for masked language +modelling), you can use the `collate_fn` argument instead to pass a function that will be called to transform the +list of samples into a batch and apply any preprocessing you want. See our +[examples](https://github.com/huggingface/transformers/tree/main/examples) or +[notebooks](https://huggingface.co/docs/transformers/notebooks) to see this approach in action. + +Once you've created a `tf.data.Dataset`, you can compile and fit the model as before: ```py ->>> model.compile( -... optimizer=tf.keras.optimizers.Adam(learning_rate=5e-5), -... loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), -... metrics=tf.metrics.SparseCategoricalAccuracy(), -... ) +model.compile(optimizer=Adam(3e-5)) ->>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3) +model.fit(tf_dataset) ``` + From 0eabab0998e1ed2992744ab8391cd3d82addbadf Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Wed, 7 Sep 2022 14:03:02 +0100 Subject: [PATCH 234/539] TF: final bias as a layer in seq2seq models (replicate TFMarian fix) (#18903) --- .../models/bart/modeling_tf_bart.py | 22 ++++++++++++++++-- .../blenderbot/modeling_tf_blenderbot.py | 23 +++++++++++++++++-- .../modeling_tf_blenderbot_small.py | 23 +++++++++++++++++-- .../models/led/modeling_tf_led.py | 23 +++++++++++++++++-- .../models/marian/modeling_tf_marian.py | 1 + .../models/mbart/modeling_tf_mbart.py | 23 +++++++++++++++++-- .../models/pegasus/modeling_tf_pegasus.py | 23 +++++++++++++++++-- ...tf_{{cookiecutter.lowercase_modelname}}.py | 23 +++++++++++++++++-- 8 files changed, 147 insertions(+), 14 deletions(-) diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index 49d2e4d9be0603..c15d0ae50451ae 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -1251,6 +1251,23 @@ def serving_output(self, output): ) +class BiasLayer(tf.keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + @add_start_docstrings( "The BART Model with a language modeling head. Can be used for summarization.", BART_START_DOCSTRING, @@ -1268,9 +1285,10 @@ def __init__(self, config, load_weight_prefix=None, *inputs, **kwargs): self.model = TFBartMainLayer(config, load_weight_prefix=load_weight_prefix, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.final_logits_bias = self.add_weight( + self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) + self.final_logits_bias = self.bias_layer.bias # alias to keep the same interface with PT def get_decoder(self): return self.model.decoder @@ -1357,7 +1375,7 @@ def call( training=training, ) lm_logits = self.model.shared(outputs[0], mode="linear") - lm_logits = lm_logits + self.final_logits_bias + lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: diff --git a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py index 66c06aa1b78f2e..2f4ee837678433 100644 --- a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py @@ -1239,6 +1239,24 @@ def serving_output(self, output): ) +# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer +class BiasLayer(tf.keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + @add_start_docstrings( "The BLENDERBOT Model with a language modeling head. Can be used for summarization.", BLENDERBOT_START_DOCSTRING, @@ -1254,9 +1272,10 @@ def __init__(self, config, *inputs, **kwargs): self.model = TFBlenderbotMainLayer(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.final_logits_bias = self.add_weight( + self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) + self.final_logits_bias = self.bias_layer.bias # alias to keep the same interface with PT def get_decoder(self): return self.model.decoder @@ -1358,7 +1377,7 @@ def call( training=training, ) lm_logits = self.model.shared(outputs[0], mode="linear") - lm_logits = lm_logits + self.final_logits_bias + lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: diff --git a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py index e292784cfa8e10..4472539d282a51 100644 --- a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py @@ -1226,6 +1226,24 @@ def serving_output(self, output): ) +# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer +class BiasLayer(tf.keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + @add_start_docstrings( "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.", BLENDERBOT_SMALL_START_DOCSTRING, @@ -1241,9 +1259,10 @@ def __init__(self, config, *inputs, **kwargs): self.model = TFBlenderbotSmallMainLayer(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.final_logits_bias = self.add_weight( + self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) + self.final_logits_bias = self.bias_layer.bias # alias to keep the same interface with PT def get_decoder(self): return self.model.decoder @@ -1330,7 +1349,7 @@ def call( training=training, ) lm_logits = self.model.shared(outputs[0], mode="linear") - lm_logits = lm_logits + self.final_logits_bias + lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index 7ff69c2a634a04..2262d1ce8d6c5c 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -2316,6 +2316,24 @@ def serving_output(self, output): ) +# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer +class BiasLayer(tf.keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + @add_start_docstrings( "The LED Model with a language modeling head. Can be used for summarization.", LED_START_DOCSTRING, @@ -2331,9 +2349,10 @@ def __init__(self, config, *inputs, **kwargs): self.led = TFLEDMainLayer(config, name="led") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.final_logits_bias = self.add_weight( + self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) + self.final_logits_bias = self.bias_layer.bias # alias to keep the same interface with PT # TODO (Joao): investigate why LED has numerical issues in XLA generate self.supports_xla_generation = False @@ -2423,7 +2442,7 @@ def call( training=training, ) lm_logits = self.led.shared(outputs[0], mode="linear") - lm_logits = lm_logits + self.final_logits_bias + lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index 50c96a82b048b2..2ceac449c1ccf3 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -1269,6 +1269,7 @@ def serving_output(self, output): ) +# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer class BiasLayer(tf.keras.layers.Layer): """ Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index 5cb39d918d5faf..47bad2e21eb272 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -1266,6 +1266,24 @@ def serving_output(self, output): ) +# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer +class BiasLayer(tf.keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + @add_start_docstrings( "The MBART Model with a language modeling head. Can be used for summarization.", MBART_START_DOCSTRING, @@ -1281,9 +1299,10 @@ def __init__(self, config, *inputs, **kwargs): self.model = TFMBartMainLayer(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.final_logits_bias = self.add_weight( + self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) + self.final_logits_bias = self.bias_layer.bias # alias to keep the same interface with PT def get_decoder(self): return self.model.decoder @@ -1368,7 +1387,7 @@ def call( training=training, ) lm_logits = self.model.shared(outputs[0], mode="linear") - lm_logits = lm_logits + self.final_logits_bias + lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: diff --git a/src/transformers/models/pegasus/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py index 85df859c847928..dbf822060d1686 100644 --- a/src/transformers/models/pegasus/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -1278,6 +1278,24 @@ def serving_output(self, output): ) +# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer +class BiasLayer(tf.keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + @add_start_docstrings( "The PEGASUS Model with a language modeling head. Can be used for summarization.", PEGASUS_START_DOCSTRING, @@ -1293,9 +1311,10 @@ def __init__(self, config, *inputs, **kwargs): self.model = TFPegasusMainLayer(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.final_logits_bias = self.add_weight( + self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) + self.final_logits_bias = self.bias_layer.bias # alias to keep the same interface with PT def get_decoder(self): return self.model.decoder @@ -1382,7 +1401,7 @@ def call( training=training, ) lm_logits = self.model.shared(outputs[0], mode="linear") - lm_logits = lm_logits + self.final_logits_bias + lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index 487b7c4461b1fb..cd83f1f10b1abd 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -2806,6 +2806,24 @@ def serving_output(self, output): ) +# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer +class BiasLayer(tf.keras.layers.Layer): + """ + Bias as a layer. It is used for serialization purposes: `tf.keras.Model.save_weights` stores on a per-layer basis, + so all weights have to be registered in a layer. + """ + + def __init__(self, shape, initializer, trainable, name, **kwargs): + super().__init__(name=name, **kwargs) + # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of + # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: + # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 + self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) + + def call(self, x): + return x + self.bias + + @add_start_docstrings( "The {{cookiecutter.uppercase_modelname}} Model with a language modeling head. Can be used for summarization.", {{cookiecutter.uppercase_modelname}}_START_DOCSTRING, @@ -2822,9 +2840,10 @@ def __init__(self, config, *inputs, **kwargs): self.model._set_save_spec(inputs=self.serving.input_signature) self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. - self.final_logits_bias = self.add_weight( + self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) + self.final_logits_bias = self.bias_layer.bias # alias to keep the same interface with PT def get_decoder(self): return self.model.decoder @@ -2911,7 +2930,7 @@ def call( training=training ) lm_logits = self.model.shared(outputs[0], mode="linear") - lm_logits = lm_logits + self.final_logits_bias + lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: From 10c774cf60b87e7c53c3a11c3126f1d115d8ea23 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 7 Sep 2022 16:22:09 +0200 Subject: [PATCH 235/539] remvoe `_create_and_check_torch_fx_tracing` in specific test files (#18667) * remvoe _create_and_check_torch_fx_tracing defined in specific model test files Co-authored-by: ydshieh --- .../models/donut/test_modeling_donut_swin.py | 101 +-------------- .../test_modeling_speech_to_text.py | 105 +-------------- tests/models/swin/test_modeling_swin.py | 101 +-------------- tests/models/xglm/test_modeling_xglm.py | 121 +----------------- 4 files changed, 4 insertions(+), 424 deletions(-) diff --git a/tests/models/donut/test_modeling_donut_swin.py b/tests/models/donut/test_modeling_donut_swin.py index f909d961880a97..a35a65505981f1 100644 --- a/tests/models/donut/test_modeling_donut_swin.py +++ b/tests/models/donut/test_modeling_donut_swin.py @@ -16,14 +16,11 @@ import collections import inspect -import os -import pickle -import tempfile import unittest from transformers import DonutSwinConfig from transformers.testing_utils import require_torch, slow, torch_device -from transformers.utils import is_torch_available, is_torch_fx_available +from transformers.utils import is_torch_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor @@ -36,9 +33,6 @@ from transformers import DonutSwinModel from transformers.models.donut.modeling_donut_swin import DONUT_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST -if is_torch_fx_available(): - from transformers.utils.fx import symbolic_trace - class DonutSwinModelTester: def __init__( @@ -369,96 +363,3 @@ def test_initialization(self): [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) - - def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): - if not is_torch_fx_available() or not self.fx_compatible: - return - - configs_no_init = _config_zero_init(config) # To be sure we have no Nan - configs_no_init.return_dict = False - - for model_class in self.all_model_classes: - model = model_class(config=configs_no_init) - model.to(torch_device) - model.eval() - inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) - - try: - if model.config.is_encoder_decoder: - model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward - labels = inputs.get("labels", None) - input_names = ["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask"] - if labels is not None: - input_names.append("labels") - - filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} - input_names = list(filtered_inputs.keys()) - - model_output = model(**filtered_inputs) - - traced_model = symbolic_trace(model, input_names) - traced_output = traced_model(**filtered_inputs) - else: - input_names = ["input_ids", "attention_mask", "token_type_ids", "pixel_values"] - - labels = inputs.get("labels", None) - start_positions = inputs.get("start_positions", None) - end_positions = inputs.get("end_positions", None) - if labels is not None: - input_names.append("labels") - if start_positions is not None: - input_names.append("start_positions") - if end_positions is not None: - input_names.append("end_positions") - - filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} - input_names = list(filtered_inputs.keys()) - - model_output = model(**filtered_inputs) - - traced_model = symbolic_trace(model, input_names) - traced_output = traced_model(**filtered_inputs) - - except RuntimeError as e: - self.fail(f"Couldn't trace module: {e}") - - def flatten_output(output): - flatten = [] - for x in output: - if isinstance(x, (tuple, list)): - flatten += flatten_output(x) - elif not isinstance(x, torch.Tensor): - continue - else: - flatten.append(x) - return flatten - - model_output = flatten_output(model_output) - traced_output = flatten_output(traced_output) - num_outputs = len(model_output) - - for i in range(num_outputs): - self.assertTrue( - torch.allclose(model_output[i], traced_output[i]), - f"traced {i}th output doesn't match model {i}th output for {model_class}", - ) - - # Test that the model can be serialized and restored properly - with tempfile.TemporaryDirectory() as tmp_dir_name: - pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") - try: - with open(pkl_file_name, "wb") as f: - pickle.dump(traced_model, f) - with open(pkl_file_name, "rb") as f: - loaded = pickle.load(f) - except Exception as e: - self.fail(f"Couldn't serialize / deserialize the traced model: {e}") - - loaded_output = loaded(**filtered_inputs) - loaded_output = flatten_output(loaded_output) - - for i in range(num_outputs): - self.assertTrue( - torch.allclose(model_output[i], loaded_output[i]), - f"serialized model {i}th output doesn't match model {i}th output for {model_class}", - ) diff --git a/tests/models/speech_to_text/test_modeling_speech_to_text.py b/tests/models/speech_to_text/test_modeling_speech_to_text.py index a1a625a9b4033f..f7645a2f0670c2 100644 --- a/tests/models/speech_to_text/test_modeling_speech_to_text.py +++ b/tests/models/speech_to_text/test_modeling_speech_to_text.py @@ -17,7 +17,6 @@ import copy import inspect import os -import pickle import tempfile import unittest @@ -31,7 +30,7 @@ slow, torch_device, ) -from transformers.utils import cached_property, is_torch_fx_available +from transformers.utils import cached_property from ...generation.test_generation_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -44,9 +43,6 @@ from transformers import Speech2TextForConditionalGeneration, Speech2TextModel, Speech2TextProcessor from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextDecoder, Speech2TextEncoder -if is_torch_fx_available(): - from transformers.utils.fx import symbolic_trace - def prepare_speech_to_text_inputs_dict( config, @@ -720,105 +716,6 @@ def _create_and_check_torchscript(self, config, inputs_dict): self.assertTrue(models_equal) - def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): - if not is_torch_fx_available() or not self.fx_compatible: - return - - configs_no_init = _config_zero_init(config) # To be sure we have no Nan - configs_no_init.return_dict = False - - for model_class in self.all_model_classes: - model = model_class(config=configs_no_init) - model.to(torch_device) - model.eval() - inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) - - try: - if model.config.is_encoder_decoder: - model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward - labels = inputs.get("labels", None) - input_names = [ - "input_ids", - "attention_mask", - "decoder_input_ids", - "decoder_attention_mask", - "input_features", - ] - if labels is not None: - input_names.append("labels") - - filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} - input_names = list(filtered_inputs.keys()) - - model_output = model(**filtered_inputs) - - traced_model = symbolic_trace(model, input_names) - traced_output = traced_model(**filtered_inputs) - else: - input_names = ["input_ids", "attention_mask", "token_type_ids", "pixel_values", "input_features"] - - labels = inputs.get("labels", None) - start_positions = inputs.get("start_positions", None) - end_positions = inputs.get("end_positions", None) - if labels is not None: - input_names.append("labels") - if start_positions is not None: - input_names.append("start_positions") - if end_positions is not None: - input_names.append("end_positions") - - filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} - input_names = list(filtered_inputs.keys()) - - model_output = model(**filtered_inputs) - - traced_model = symbolic_trace(model, input_names) - traced_output = traced_model(**filtered_inputs) - - except RuntimeError as e: - self.fail(f"Couldn't trace module: {e}") - - def flatten_output(output): - flatten = [] - for x in output: - if isinstance(x, (tuple, list)): - flatten += flatten_output(x) - elif not isinstance(x, torch.Tensor): - continue - else: - flatten.append(x) - return flatten - - model_output = flatten_output(model_output) - traced_output = flatten_output(traced_output) - num_outputs = len(model_output) - - for i in range(num_outputs): - self.assertTrue( - torch.allclose(model_output[i], traced_output[i]), - f"traced {i}th output doesn't match model {i}th output for {model_class}", - ) - - # Test that the model can be serialized and restored properly - with tempfile.TemporaryDirectory() as tmp_dir_name: - pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") - try: - with open(pkl_file_name, "wb") as f: - pickle.dump(traced_model, f) - with open(pkl_file_name, "rb") as f: - loaded = pickle.load(f) - except Exception as e: - self.fail(f"Couldn't serialize / deserialize the traced model: {e}") - - loaded_output = loaded(**filtered_inputs) - loaded_output = flatten_output(loaded_output) - - for i in range(num_outputs): - self.assertTrue( - torch.allclose(model_output[i], loaded_output[i]), - f"serialized model {i}th output doesn't match model {i}th output for {model_class}", - ) - @require_torch @require_torchaudio diff --git a/tests/models/swin/test_modeling_swin.py b/tests/models/swin/test_modeling_swin.py index 5e07efa2a3dc00..9a5541d50911a7 100644 --- a/tests/models/swin/test_modeling_swin.py +++ b/tests/models/swin/test_modeling_swin.py @@ -16,14 +16,11 @@ import collections import inspect -import os -import pickle -import tempfile import unittest from transformers import SwinConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device -from transformers.utils import cached_property, is_torch_available, is_torch_fx_available, is_vision_available +from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor @@ -41,9 +38,6 @@ from transformers import AutoFeatureExtractor -if is_torch_fx_available(): - from transformers.utils.fx import symbolic_trace - class SwinModelTester: def __init__( @@ -428,99 +422,6 @@ def test_initialization(self): msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) - def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): - if not is_torch_fx_available() or not self.fx_compatible: - return - - configs_no_init = _config_zero_init(config) # To be sure we have no Nan - configs_no_init.return_dict = False - - for model_class in self.all_model_classes: - model = model_class(config=configs_no_init) - model.to(torch_device) - model.eval() - inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) - - try: - if model.config.is_encoder_decoder: - model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward - labels = inputs.get("labels", None) - input_names = ["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask"] - if labels is not None: - input_names.append("labels") - - filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} - input_names = list(filtered_inputs.keys()) - - model_output = model(**filtered_inputs) - - traced_model = symbolic_trace(model, input_names) - traced_output = traced_model(**filtered_inputs) - else: - input_names = ["input_ids", "attention_mask", "token_type_ids", "pixel_values"] - - labels = inputs.get("labels", None) - start_positions = inputs.get("start_positions", None) - end_positions = inputs.get("end_positions", None) - if labels is not None: - input_names.append("labels") - if start_positions is not None: - input_names.append("start_positions") - if end_positions is not None: - input_names.append("end_positions") - - filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} - input_names = list(filtered_inputs.keys()) - - model_output = model(**filtered_inputs) - - traced_model = symbolic_trace(model, input_names) - traced_output = traced_model(**filtered_inputs) - - except RuntimeError as e: - self.fail(f"Couldn't trace module: {e}") - - def flatten_output(output): - flatten = [] - for x in output: - if isinstance(x, (tuple, list)): - flatten += flatten_output(x) - elif not isinstance(x, torch.Tensor): - continue - else: - flatten.append(x) - return flatten - - model_output = flatten_output(model_output) - traced_output = flatten_output(traced_output) - num_outputs = len(model_output) - - for i in range(num_outputs): - self.assertTrue( - torch.allclose(model_output[i], traced_output[i]), - f"traced {i}th output doesn't match model {i}th output for {model_class}", - ) - - # Test that the model can be serialized and restored properly - with tempfile.TemporaryDirectory() as tmp_dir_name: - pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") - try: - with open(pkl_file_name, "wb") as f: - pickle.dump(traced_model, f) - with open(pkl_file_name, "rb") as f: - loaded = pickle.load(f) - except Exception as e: - self.fail(f"Couldn't serialize / deserialize the traced model: {e}") - - loaded_output = loaded(**filtered_inputs) - loaded_output = flatten_output(loaded_output) - - for i in range(num_outputs): - self.assertTrue( - torch.allclose(model_output[i], loaded_output[i]), - f"serialized model {i}th output doesn't match model {i}th output for {model_class}", - ) - @require_vision @require_torch diff --git a/tests/models/xglm/test_modeling_xglm.py b/tests/models/xglm/test_modeling_xglm.py index f4da4994266d27..6d40ddab8eb2cc 100644 --- a/tests/models/xglm/test_modeling_xglm.py +++ b/tests/models/xglm/test_modeling_xglm.py @@ -15,24 +15,14 @@ import datetime import math -import os -import pickle -import tempfile import unittest from transformers import XGLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device -from transformers.utils import is_torch_fx_available from ...generation.test_generation_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester -from ...test_modeling_common import ( - ModelTesterMixin, - _config_zero_init, - floats_tensor, - ids_tensor, - random_attention_mask, -) +from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): @@ -40,9 +30,6 @@ from transformers import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMTokenizer -if is_torch_fx_available(): - from transformers.utils.fx import symbolic_trace - class XGLMModelTester: def __init__( @@ -350,112 +337,6 @@ def test_xglm_weight_initialization(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xglm_weight_initialization(*config_and_inputs) - def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): - if not is_torch_fx_available() or not self.fx_compatible: - return - - configs_no_init = _config_zero_init(config) # To be sure we have no Nan - configs_no_init.return_dict = False - - for model_class in self.all_model_classes: - model = model_class(config=configs_no_init) - model.to(torch_device) - model.eval() - inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) - - try: - if model.config.is_encoder_decoder: - model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward - labels = inputs.get("labels", None) - input_names = [ - "input_ids", - "attention_mask", - "decoder_input_ids", - "decoder_attention_mask", - "input_features", - ] - if labels is not None: - input_names.append("labels") - - filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} - input_names = list(filtered_inputs.keys()) - - model_output = model(**filtered_inputs) - - traced_model = symbolic_trace(model, input_names) - traced_output = traced_model(**filtered_inputs) - else: - input_names = [ - "input_ids", - "attention_mask", - "token_type_ids", - "pixel_values", - "bbox", - "input_features", - ] - - labels = inputs.get("labels", None) - start_positions = inputs.get("start_positions", None) - end_positions = inputs.get("end_positions", None) - if labels is not None: - input_names.append("labels") - if start_positions is not None: - input_names.append("start_positions") - if end_positions is not None: - input_names.append("end_positions") - - filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} - input_names = list(filtered_inputs.keys()) - - model_output = model(**filtered_inputs) - - traced_model = symbolic_trace(model, input_names) - traced_output = traced_model(**filtered_inputs) - - except RuntimeError as e: - self.fail(f"Couldn't trace module: {e}") - - def flatten_output(output): - flatten = [] - for x in output: - if isinstance(x, (tuple, list)): - flatten += flatten_output(x) - elif not isinstance(x, torch.Tensor): - continue - else: - flatten.append(x) - return flatten - - model_output = flatten_output(model_output) - traced_output = flatten_output(traced_output) - num_outputs = len(model_output) - - for i in range(num_outputs): - self.assertTrue( - torch.allclose(model_output[i], traced_output[i]), - f"traced {i}th output doesn't match model {i}th output for {model_class}", - ) - - # Test that the model can be serialized and restored properly - with tempfile.TemporaryDirectory() as tmp_dir_name: - pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") - try: - with open(pkl_file_name, "wb") as f: - pickle.dump(traced_model, f) - with open(pkl_file_name, "rb") as f: - loaded = pickle.load(f) - except Exception as e: - self.fail(f"Couldn't serialize / deserialize the traced model: {e}") - - loaded_output = loaded(**filtered_inputs) - loaded_output = flatten_output(loaded_output) - - for i in range(num_outputs): - self.assertTrue( - torch.allclose(model_output[i], loaded_output[i]), - f"serialized model {i}th output doesn't match model {i}th output for {model_class}", - ) - @slow def test_batch_generation(self): model = XGLMForCausalLM.from_pretrained("facebook/xglm-564M") From 3059d80d80dabe3119d8a009336afb326d56ecb6 Mon Sep 17 00:00:00 2001 From: Olatunji Ruwase Date: Wed, 7 Sep 2022 10:44:20 -0400 Subject: [PATCH 236/539] [DeepSpeed ZeRO3] Fix performance degradation in sharded models (#18911) * [DeepSpeed] Fix performance degradation in sharded models * style * polish Co-authored-by: Stas Bekman --- src/transformers/modeling_utils.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 4c9354d795b299..3a3eb3edc14d32 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -421,12 +421,17 @@ def load(module: nn.Module, state_dict, prefix=""): if is_deepspeed_zero3_enabled(): import deepspeed - # because zero3 puts placeholders in model params, this context - # manager gathers (unpartitions) the params of the current layer, then loads from - # the state dict and then re-partitions them again - with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0): - if torch.distributed.get_rank() == 0: - module._load_from_state_dict(*args) + # In sharded models, each shard has only part of the full state_dict, so only gather + # parameters that are in the current state_dict. + named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False)) + params_to_gather = [named_parameters[k] for k in state_dict.keys() if k in named_parameters] + if len(params_to_gather) > 0: + # because zero3 puts placeholders in model params, this context + # manager gathers (unpartitions) the params of the current layer, then loads from + # the state dict and then re-partitions them again + with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0): + if torch.distributed.get_rank() == 0: + module._load_from_state_dict(*args) else: module._load_from_state_dict(*args) From 2ef774211733f0acf8d3415f9284c49ef219e991 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 7 Sep 2022 10:38:49 -0700 Subject: [PATCH 237/539] Add DocumentQuestionAnswering pipeline (#18414) * [WIP] Skeleton of VisualQuestionAnweringPipeline extended to support LayoutLM-like models * Fixup * Use the full encoding * Basic refactoring to DocumentQuestionAnsweringPipeline * Cleanup * Improve args, docs, and implement preprocessing * Integrate OCR * Refactor question_answering pipeline * Use refactored QA code in the document qa pipeline * Fix tests * Some small cleanups * Use a string type annotation for Image.Image * Update encoding with image features * Wire through the basic docs * Handle invalid response * Handle empty word_boxes properly * Docstring fix * Integrate Donut model * Fixup * Incorporate comments * Address comments * Initial incorporation of tests * Address Comments * Change assert to ValueError * Comments * Wrap `score` in float to make it JSON serializable * Incorporate AutoModeLForDocumentQuestionAnswering changes * Fixup * Rename postprocess function * Fix auto import * Applying comments * Improve docs * Remove extra assets and add copyright * Address comments Co-authored-by: Ankur Goyal --- docs/source/en/main_classes/pipelines.mdx | 7 + docs/source/en/model_doc/auto.mdx | 8 + src/transformers/__init__.py | 10 + src/transformers/models/auto/__init__.py | 8 + src/transformers/models/auto/modeling_auto.py | 22 + .../models/auto/modeling_tf_auto.py | 21 + src/transformers/pipelines/__init__.py | 13 +- src/transformers/pipelines/base.py | 4 +- .../pipelines/document_question_answering.py | 443 ++++++++++++++++++ .../pipelines/question_answering.py | 188 +++++--- src/transformers/utils/dummy_pt_objects.py | 10 + src/transformers/utils/dummy_tf_objects.py | 10 + src/transformers/utils/fx.py | 5 +- .../models/layoutlm/test_modeling_layoutlm.py | 30 -- .../layoutlm/test_modeling_tf_layoutlm.py | 25 - ...t_pipelines_document_question_answering.py | 280 +++++++++++ tests/test_modeling_common.py | 11 +- tests/test_modeling_tf_common.py | 6 +- 18 files changed, 962 insertions(+), 139 deletions(-) create mode 100644 src/transformers/pipelines/document_question_answering.py create mode 100644 tests/pipelines/test_pipelines_document_question_answering.py diff --git a/docs/source/en/main_classes/pipelines.mdx b/docs/source/en/main_classes/pipelines.mdx index b2de7e048dd5aa..4043a00009e22d 100644 --- a/docs/source/en/main_classes/pipelines.mdx +++ b/docs/source/en/main_classes/pipelines.mdx @@ -25,6 +25,7 @@ There are two categories of pipeline abstractions to be aware about: - [`AudioClassificationPipeline`] - [`AutomaticSpeechRecognitionPipeline`] - [`ConversationalPipeline`] + - [`DocumentQuestionAnsweringPipeline`] - [`FeatureExtractionPipeline`] - [`FillMaskPipeline`] - [`ImageClassificationPipeline`] @@ -342,6 +343,12 @@ That should enable you to do all the custom code you want. - __call__ - all +### DocumentQuestionAnsweringPipeline + +[[autodoc]] DocumentQuestionAnsweringPipeline + - __call__ + - all + ### FeatureExtractionPipeline [[autodoc]] FeatureExtractionPipeline diff --git a/docs/source/en/model_doc/auto.mdx b/docs/source/en/model_doc/auto.mdx index 995296485b9402..93976424ba8edd 100644 --- a/docs/source/en/model_doc/auto.mdx +++ b/docs/source/en/model_doc/auto.mdx @@ -114,6 +114,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its [[autodoc]] AutoModelForTableQuestionAnswering +## AutoModelForDocumentQuestionAnswering + +[[autodoc]] AutoModelForDocumentQuestionAnswering + ## AutoModelForImageClassification [[autodoc]] AutoModelForImageClassification @@ -214,6 +218,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its [[autodoc]] TFAutoModelForTableQuestionAnswering +## TFAutoModelForDocumentQuestionAnswering + +[[autodoc]] TFAutoModelForDocumentQuestionAnswering + ## TFAutoModelForTokenClassification [[autodoc]] TFAutoModelForTokenClassification diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 4651c3b5b9e921..e10e2ce0ba0a41 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -383,6 +383,7 @@ "Conversation", "ConversationalPipeline", "CsvPipelineDataFormat", + "DocumentQuestionAnsweringPipeline", "FeatureExtractionPipeline", "FillMaskPipeline", "ImageClassificationPipeline", @@ -789,6 +790,7 @@ "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING", "MODEL_FOR_CAUSAL_LM_MAPPING", "MODEL_FOR_CTC_MAPPING", + "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING", "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING", @@ -816,6 +818,7 @@ "AutoModelForAudioXVector", "AutoModelForCausalLM", "AutoModelForCTC", + "AutoModelForDocumentQuestionAnswering", "AutoModelForImageClassification", "AutoModelForImageSegmentation", "AutoModelForInstanceSegmentation", @@ -2107,6 +2110,7 @@ "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "TF_MODEL_FOR_PRETRAINING_MAPPING", + "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", @@ -2124,6 +2128,7 @@ "TFAutoModelForMultipleChoice", "TFAutoModelForNextSentencePrediction", "TFAutoModelForPreTraining", + "TFAutoModelForDocumentQuestionAnswering", "TFAutoModelForQuestionAnswering", "TFAutoModelForSemanticSegmentation", "TFAutoModelForSeq2SeqLM", @@ -3200,6 +3205,7 @@ Conversation, ConversationalPipeline, CsvPipelineDataFormat, + DocumentQuestionAnsweringPipeline, FeatureExtractionPipeline, FillMaskPipeline, ImageClassificationPipeline, @@ -3549,6 +3555,7 @@ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CTC_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, @@ -3576,6 +3583,7 @@ AutoModelForAudioXVector, AutoModelForCausalLM, AutoModelForCTC, + AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, @@ -4637,6 +4645,7 @@ ) from .models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, @@ -4655,6 +4664,7 @@ TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForCausalLM, + TFAutoModelForDocumentQuestionAnswering, TFAutoModelForImageClassification, TFAutoModelForMaskedLM, TFAutoModelForMultipleChoice, diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index ec253f6037a3d3..6129253f14711b 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -47,6 +47,7 @@ "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING", "MODEL_FOR_CAUSAL_LM_MAPPING", "MODEL_FOR_CTC_MAPPING", + "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING", "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING", @@ -93,6 +94,7 @@ "AutoModelForVideoClassification", "AutoModelForVision2Seq", "AutoModelForVisualQuestionAnswering", + "AutoModelForDocumentQuestionAnswering", "AutoModelWithLMHead", ] @@ -111,6 +113,7 @@ "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "TF_MODEL_FOR_PRETRAINING_MAPPING", "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", + "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING", "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", @@ -127,6 +130,7 @@ "TFAutoModelForMultipleChoice", "TFAutoModelForNextSentencePrediction", "TFAutoModelForPreTraining", + "TFAutoModelForDocumentQuestionAnswering", "TFAutoModelForQuestionAnswering", "TFAutoModelForSemanticSegmentation", "TFAutoModelForSeq2SeqLM", @@ -191,6 +195,7 @@ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_CTC_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, @@ -218,6 +223,7 @@ AutoModelForAudioXVector, AutoModelForCausalLM, AutoModelForCTC, + AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, @@ -248,6 +254,7 @@ else: from .modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, @@ -266,6 +273,7 @@ TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForCausalLM, + TFAutoModelForDocumentQuestionAnswering, TFAutoModelForImageClassification, TFAutoModelForMaskedLM, TFAutoModelForMultipleChoice, diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 5060b535b05da4..1cb0ae44db0105 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -603,6 +603,14 @@ ] ) +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("layoutlm", "LayoutLMForQuestionAnswering"), + ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"), + ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"), + ] +) + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict( [ # Model for Token Classification mapping @@ -773,6 +781,9 @@ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES ) +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES +) MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES) MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES @@ -891,6 +902,17 @@ class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass): ) +class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + +AutoModelForDocumentQuestionAnswering = auto_class_update( + AutoModelForDocumentQuestionAnswering, + head_doc="document question answering", + checkpoint_for_example='impira/layoutlm-document-qa", revision="3dc6de3', +) + + class AutoModelForTokenClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index a12f6accdcaeee..ba1e74e14caf63 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -315,6 +315,13 @@ ] ) +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( + [ + ("layoutlm", "TFLayoutLMForQuestionAnswering"), + ] +) + + TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( [ # Model for Table Question Answering mapping @@ -406,6 +413,9 @@ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES +) TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES ) @@ -515,6 +525,17 @@ class TFAutoModelForQuestionAnswering(_BaseAutoModelClass): TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering") +class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): + _model_mapping = TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + +TFAutoModelForDocumentQuestionAnswering = auto_class_update( + TFAutoModelForDocumentQuestionAnswering, + head_doc="document question answering", + checkpoint_for_example='impira/layoutlm-document-qa", revision="3dc6de3', +) + + class TFAutoModelForTableQuestionAnswering(_BaseAutoModelClass): _model_mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index ee7dee57c0e9d2..e3f9e603b5111d 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -51,6 +51,7 @@ infer_framework_load_model, ) from .conversational import Conversation, ConversationalPipeline +from .document_question_answering import DocumentQuestionAnsweringPipeline from .feature_extraction import FeatureExtractionPipeline from .fill_mask import FillMaskPipeline from .image_classification import ImageClassificationPipeline @@ -109,6 +110,7 @@ AutoModelForAudioClassification, AutoModelForCausalLM, AutoModelForCTC, + AutoModelForDocumentQuestionAnswering, AutoModelForImageClassification, AutoModelForImageSegmentation, AutoModelForMaskedLM, @@ -215,6 +217,15 @@ }, "type": "multimodal", }, + "document-question-answering": { + "impl": DocumentQuestionAnsweringPipeline, + "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), + "tf": (), + "default": { + "model": {"pt": ("impira/layoutlm-document-qa", "3a93017")}, + }, + "type": "multimodal", + }, "fill-mask": { "impl": FillMaskPipeline, "tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (), @@ -443,7 +454,7 @@ def pipeline( trust_remote_code: Optional[bool] = None, model_kwargs: Dict[str, Any] = None, pipeline_class: Optional[Any] = None, - **kwargs + **kwargs, ) -> Pipeline: """ Utility factory method to build a [`Pipeline`]. diff --git a/src/transformers/pipelines/base.py b/src/transformers/pipelines/base.py index 7842b95b32859c..b5e7c9cb58ce08 100644 --- a/src/transformers/pipelines/base.py +++ b/src/transformers/pipelines/base.py @@ -178,7 +178,7 @@ def infer_framework_load_model( model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, - **model_kwargs + **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). @@ -274,7 +274,7 @@ def infer_framework_from_model( model_classes: Optional[Dict[str, Tuple[type]]] = None, task: Optional[str] = None, framework: Optional[str] = None, - **model_kwargs + **model_kwargs, ): """ Select framework (TensorFlow or PyTorch) to use from the `model` passed. Returns a tuple (framework, model). diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py new file mode 100644 index 00000000000000..b0fe18cb9dd6c2 --- /dev/null +++ b/src/transformers/pipelines/document_question_answering.py @@ -0,0 +1,443 @@ +# Copyright 2022 The Impira Team and the HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from typing import List, Optional, Tuple, Union + +import numpy as np + +from ..utils import ( + ExplicitEnum, + add_end_docstrings, + is_pytesseract_available, + is_torch_available, + is_vision_available, + logging, +) +from .base import PIPELINE_INIT_ARGS, Pipeline +from .question_answering import select_starts_ends + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + +TESSERACT_LOADED = False +if is_pytesseract_available(): + TESSERACT_LOADED = True + import pytesseract + +logger = logging.get_logger(__name__) + + +# normalize_bbox() and apply_tesseract() are derived from apply_tesseract in models/layoutlmv3/feature_extraction_layoutlmv3.py. +# However, because the pipeline may evolve from what layoutlmv3 currently does, it's copied (vs. imported) to avoid creating an +# unecessary dependency. +def normalize_box(box, width, height): + return [ + int(1000 * (box[0] / width)), + int(1000 * (box[1] / height)), + int(1000 * (box[2] / width)), + int(1000 * (box[3] / height)), + ] + + +def apply_tesseract(image: "Image.Image", lang: Optional[str], tesseract_config: Optional[str]): + """Applies Tesseract OCR on a document image, and returns recognized words + normalized bounding boxes.""" + # apply OCR + data = pytesseract.image_to_data(image, lang=lang, output_type="dict", config=tesseract_config) + words, left, top, width, height = data["text"], data["left"], data["top"], data["width"], data["height"] + + # filter empty words and corresponding coordinates + irrelevant_indices = [idx for idx, word in enumerate(words) if not word.strip()] + words = [word for idx, word in enumerate(words) if idx not in irrelevant_indices] + left = [coord for idx, coord in enumerate(left) if idx not in irrelevant_indices] + top = [coord for idx, coord in enumerate(top) if idx not in irrelevant_indices] + width = [coord for idx, coord in enumerate(width) if idx not in irrelevant_indices] + height = [coord for idx, coord in enumerate(height) if idx not in irrelevant_indices] + + # turn coordinates into (left, top, left+width, top+height) format + actual_boxes = [] + for x, y, w, h in zip(left, top, width, height): + actual_box = [x, y, x + w, y + h] + actual_boxes.append(actual_box) + + image_width, image_height = image.size + + # finally, normalize the bounding boxes + normalized_boxes = [] + for box in actual_boxes: + normalized_boxes.append(normalize_box(box, image_width, image_height)) + + if len(words) != len(normalized_boxes): + raise ValueError("Not as many words as there are bounding boxes") + + return words, normalized_boxes + + +class ModelType(ExplicitEnum): + LayoutLM = "layoutlm" + LayoutLMv2andv3 = "layoutlmv2andv3" + VisionEncoderDecoder = "vision_encoder_decoder" + + +@add_end_docstrings(PIPELINE_INIT_ARGS) +class DocumentQuestionAnsweringPipeline(Pipeline): + # TODO: Update task_summary docs to include an example with document QA and then update the first sentence + """ + Document Question Answering pipeline using any `AutoModelForDocumentQuestionAnswering`. The inputs/outputs are + similar to the (extractive) question answering pipeline; however, the pipeline takes an image (and optional OCR'd + words/boxes) as input instead of text context. + + This document question answering pipeline can currently be loaded from [`pipeline`] using the following task + identifier: `"document-question-answering"`. + + The models that this pipeline can use are models that have been fine-tuned on a document question answering task. + See the up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=document-question-answering). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) + + if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": + self.model_type = ModelType.VisionEncoderDecoder + if self.model.config.encoder.model_type != "donut-swin": + raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut") + elif self.model.config.__class__.__name__ == "LayoutLMConfig": + self.model_type = ModelType.LayoutLM + else: + self.model_type = ModelType.LayoutLMv2andv3 + + def _sanitize_parameters( + self, + padding=None, + doc_stride=None, + max_question_len=None, + lang: Optional[str] = None, + tesseract_config: Optional[str] = None, + max_answer_len=None, + max_seq_len=None, + top_k=None, + handle_impossible_answer=None, + **kwargs, + ): + preprocess_params, postprocess_params = {}, {} + if padding is not None: + preprocess_params["padding"] = padding + if doc_stride is not None: + preprocess_params["doc_stride"] = doc_stride + if max_question_len is not None: + preprocess_params["max_question_len"] = max_question_len + if max_seq_len is not None: + preprocess_params["max_seq_len"] = max_seq_len + if lang is not None: + preprocess_params["lang"] = lang + if tesseract_config is not None: + preprocess_params["tesseract_config"] = tesseract_config + + if top_k is not None: + if top_k < 1: + raise ValueError(f"top_k parameter should be >= 1 (got {top_k})") + postprocess_params["top_k"] = top_k + if max_answer_len is not None: + if max_answer_len < 1: + raise ValueError(f"max_answer_len parameter should be >= 1 (got {max_answer_len}") + postprocess_params["max_answer_len"] = max_answer_len + if handle_impossible_answer is not None: + postprocess_params["handle_impossible_answer"] = handle_impossible_answer + + return preprocess_params, {}, postprocess_params + + def __call__( + self, + image: Union["Image.Image", str], + question: Optional[str] = None, + word_boxes: Tuple[str, List[float]] = None, + **kwargs, + ): + """ + Answer the question(s) given as inputs by using the document(s). A document is defined as an image and an + optional list of (word, box) tuples which represent the text in the document. If the `word_boxes` are not + provided, it will use the Tesseract OCR engine (if available) to extract the words and boxes automatically for + LayoutLM-like models which require them as input. For Donut, no OCR is run. + + You can invoke the pipeline several ways: + + - `pipeline(image=image, question=question)` + - `pipeline(image=image, question=question, word_boxes=word_boxes)` + - `pipeline([{"image": image, "question": question}])` + - `pipeline([{"image": image, "question": question, "word_boxes": word_boxes}])` + + Args: + image (`str` or `PIL.Image`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. If given a single image, it can be + broadcasted to multiple questions. + question (`str`): + A question to ask of the document. + word_boxes (`List[str, Tuple[float, float, float, float]]`, *optional*): + A list of words and bounding boxes (normalized 0->1000). If you provide this optional input, then the + pipeline will use these words and boxes instead of running OCR on the image to derive them for models + that need them (e.g. LayoutLM). This allows you to reuse OCR'd results across many invocations of the + pipeline without having to re-run it each time. + top_k (`int`, *optional*, defaults to 1): + The number of answers to return (will be chosen by order of likelihood). Note that we return less than + top_k answers if there are not enough options available within the context. + doc_stride (`int`, *optional*, defaults to 128): + If the words in the document are too long to fit with the question for the model, it will be split in + several chunks with some overlap. This argument controls the size of that overlap. + max_answer_len (`int`, *optional*, defaults to 15): + The maximum length of predicted answers (e.g., only answers with a shorter length are considered). + max_seq_len (`int`, *optional*, defaults to 384): + The maximum length of the total sentence (context + question) in tokens of each chunk passed to the + model. The context will be split in several chunks (using `doc_stride` as overlap) if needed. + max_question_len (`int`, *optional*, defaults to 64): + The maximum length of the question after tokenization. It will be truncated if needed. + handle_impossible_answer (`bool`, *optional*, defaults to `False`): + Whether or not we accept impossible as an answer. + lang (`str`, *optional*): + Language to use while running OCR. Defaults to english. + tesseract_config (`str`, *optional*): + Additional flags to pass to tesseract while running OCR. + + Return: + A `dict` or a list of `dict`: Each result comes as a dictionary with the following keys: + + - **score** (`float`) -- The probability associated to the answer. + - **start** (`int`) -- The start word index of the answer (in the OCR'd version of the input or provided + `word_boxes`). + - **end** (`int`) -- The end word index of the answer (in the OCR'd version of the input or provided + `word_boxes`). + - **answer** (`str`) -- The answer to the question. + """ + if isinstance(question, str): + inputs = {"question": question, "image": image} + if word_boxes is not None: + inputs["word_boxes"] = word_boxes + else: + inputs = image + return super().__call__(inputs, **kwargs) + + def preprocess(self, input, lang=None, tesseract_config=""): + image = None + image_features = {} + if input.get("image", None) is not None: + image = load_image(input["image"]) + if self.feature_extractor is not None: + image_features.update(self.feature_extractor(images=image, return_tensors=self.framework)) + elif self.model_type == ModelType.VisionEncoderDecoder: + raise ValueError("If you are using a VisionEncoderDecoderModel, you must provide a feature extractor") + + words, boxes = None, None + if not self.model_type == ModelType.VisionEncoderDecoder: + if "word_boxes" in input: + words = [x[0] for x in input["word_boxes"]] + boxes = [x[1] for x in input["word_boxes"]] + elif "words" in image_features and "boxes" in image_features: + words = image_features.pop("words")[0] + boxes = image_features.pop("boxes")[0] + elif image is not None: + if not TESSERACT_LOADED: + raise ValueError( + "If you provide an image without word_boxes, then the pipeline will run OCR using Tesseract," + " but pytesseract is not available" + ) + if TESSERACT_LOADED: + words, boxes = apply_tesseract(image, lang=lang, tesseract_config=tesseract_config) + else: + raise ValueError( + "You must provide an image or word_boxes. If you provide an image, the pipeline will automatically" + " run OCR to derive words and boxes" + ) + + if self.tokenizer.padding_side != "right": + raise ValueError( + "Document question answering only supports tokenizers whose padding side is 'right', not" + f" {self.tokenizer.padding_side}" + ) + + if self.model_type == ModelType.VisionEncoderDecoder: + task_prompt = f'{input["question"]}' + # Adapted from https://huggingface.co/spaces/nielsr/donut-docvqa/blob/main/app.py + encoding = { + "inputs": image_features["pixel_values"], + "decoder_input_ids": self.tokenizer( + task_prompt, add_special_tokens=False, return_tensors=self.framework + ).input_ids, + "return_dict_in_generate": True, + } + p_mask = None + word_ids = None + words = None + else: + tokenizer_kwargs = {} + if self.model_type == ModelType.LayoutLM: + tokenizer_kwargs["text"] = input["question"].split() + tokenizer_kwargs["text_pair"] = words + tokenizer_kwargs["is_split_into_words"] = True + else: + tokenizer_kwargs["text"] = [input["question"]] + tokenizer_kwargs["text_pair"] = [words] + tokenizer_kwargs["boxes"] = [boxes] + + encoding = self.tokenizer( + return_token_type_ids=True, + return_tensors=self.framework, + # TODO: In a future PR, use these feature to handle sequences whose length is longer than + # the maximum allowed by the model. Currently, the tokenizer will produce a sequence that + # may be too long for the model to handle. + # truncation="only_second", + # return_overflowing_tokens=True, + **tokenizer_kwargs, + ) + + if "pixel_values" in image_features: + encoding["image"] = image_features.pop("pixel_values") + + # TODO: For now, this should always be num_spans == 1 given the flags we've passed in above, but the + # code is written to naturally handle multiple spans at the right time. + num_spans = len(encoding["input_ids"]) + + # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) + # We put 0 on the tokens from the context and 1 everywhere else (question and special tokens) + # This logic mirrors the logic in the question_answering pipeline + p_mask = [[tok != 1 for tok in encoding.sequence_ids(span_id)] for span_id in range(num_spans)] + for span_idx in range(num_spans): + input_ids_span_idx = encoding["input_ids"][span_idx] + # keep the cls_token unmasked (some models use it to indicate unanswerable questions) + if self.tokenizer.cls_token_id is not None: + cls_indices = np.nonzero(np.array(input_ids_span_idx) == self.tokenizer.cls_token_id)[0] + for cls_index in cls_indices: + p_mask[span_idx][cls_index] = 0 + + # For each span, place a bounding box [0,0,0,0] for question and CLS tokens, [1000,1000,1000,1000] + # for SEP tokens, and the word's bounding box for words in the original document. + if "boxes" not in tokenizer_kwargs: + bbox = [] + for batch_index in range(num_spans): + for input_id, sequence_id, word_id in zip( + encoding.input_ids[batch_index], + encoding.sequence_ids(batch_index), + encoding.word_ids(batch_index), + ): + if sequence_id == 1: + bbox.append(boxes[word_id]) + elif input_id == self.tokenizer.sep_token_id: + bbox.append([1000] * 4) + else: + bbox.append([0] * 4) + + if self.framework == "tf": + raise ValueError("Unsupported: Tensorflow preprocessing for DocumentQuestionAnsweringPipeline") + elif self.framework == "pt": + encoding["bbox"] = torch.tensor([bbox]) + + word_ids = [encoding.word_ids(i) for i in range(num_spans)] + + return {**encoding, "p_mask": p_mask, "word_ids": word_ids, "words": words} + + def _forward(self, model_inputs): + p_mask = model_inputs.pop("p_mask", None) + word_ids = model_inputs.pop("word_ids", None) + words = model_inputs.pop("words", None) + + if self.model_type == ModelType.VisionEncoderDecoder: + model_outputs = self.model.generate(**model_inputs) + else: + model_outputs = self.model(**model_inputs) + + model_outputs["p_mask"] = p_mask + model_outputs["word_ids"] = word_ids + model_outputs["words"] = words + model_outputs["attention_mask"] = model_inputs.get("attention_mask", None) + return model_outputs + + def postprocess(self, model_outputs, top_k=1, **kwargs): + if self.model_type == ModelType.VisionEncoderDecoder: + answers = self.postprocess_donut(model_outputs) + else: + answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) + + answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k] + if len(answers) == 1: + return answers[0] + return answers + + def postprocess_donut(self, model_outputs, **kwargs): + sequence = self.tokenizer.batch_decode(model_outputs.sequences)[0] + + # TODO: A lot of this logic is specific to Donut and should probably be handled in the tokenizer + # (see https://github.com/huggingface/transformers/pull/18414/files#r961747408 for more context). + sequence = sequence.replace(self.tokenizer.eos_token, "").replace(self.tokenizer.pad_token, "") + sequence = re.sub(r"<.*?>", "", sequence, count=1).strip() # remove first task start token + ret = { + "answer": None, + } + + answer = re.search(r"(.*)", sequence) + if answer is not None: + ret["answer"] = answer.group(1).strip() + return [ret] + + def postprocess_extractive_qa( + self, model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, **kwargs + ): + min_null_score = 1000000 # large and positive + answers = [] + words = model_outputs["words"] + + # TODO: Currently, we expect the length of model_outputs to be 1, because we do not stride + # in the preprocessor code. When we implement that, we'll either need to handle tensors of size + # > 1 or use the ChunkPipeline and handle multiple outputs (each of size = 1). + starts, ends, scores, min_null_score = select_starts_ends( + model_outputs["start_logits"], + model_outputs["end_logits"], + model_outputs["p_mask"], + model_outputs["attention_mask"].numpy() if model_outputs.get("attention_mask", None) is not None else None, + min_null_score, + top_k, + handle_impossible_answer, + max_answer_len, + ) + + word_ids = model_outputs["word_ids"][0] + for start, eend, score in zip(starts, ends, scores): + word_start, word_end = word_ids[start], word_ids[eend] + if word_start is not None and word_end is not None: + answers.append( + { + "score": float(score), # XXX Write a test that verifies the result is JSON-serializable + "answer": " ".join(words[word_start : word_end + 1]), + "start": word_start, + "end": word_end, + } + ) + + if handle_impossible_answer: + answers.append({"score": min_null_score, "answer": "", "start": 0, "end": 0}) + + return answers diff --git a/src/transformers/pipelines/question_answering.py b/src/transformers/pipelines/question_answering.py index 6f07382dc57c6b..6a1a0011c5efc1 100644 --- a/src/transformers/pipelines/question_answering.py +++ b/src/transformers/pipelines/question_answering.py @@ -42,6 +42,110 @@ from ..models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING +def decode_spans( + start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray +) -> Tuple: + """ + Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the actual + answer. + + In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or + answer end position being before the starting position. The method supports output the k-best answer through the + topk argument. + + Args: + start (`np.ndarray`): Individual start probabilities for each token. + end (`np.ndarray`): Individual end probabilities for each token. + topk (`int`): Indicates how many possible answer span(s) to extract from the model output. + max_answer_len (`int`): Maximum size of the answer to extract from the model's output. + undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer + """ + # Ensure we have batch axis + if start.ndim == 1: + start = start[None] + + if end.ndim == 1: + end = end[None] + + # Compute the score of each tuple(start, end) to be the real answer + outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1)) + + # Remove candidate with end < start and end - start > max_answer_len + candidates = np.tril(np.triu(outer), max_answer_len - 1) + + # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA) + scores_flat = candidates.flatten() + if topk == 1: + idx_sort = [np.argmax(scores_flat)] + elif len(scores_flat) < topk: + idx_sort = np.argsort(-scores_flat) + else: + idx = np.argpartition(-scores_flat, topk)[0:topk] + idx_sort = idx[np.argsort(-scores_flat[idx])] + + starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:] + desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()) + starts = starts[desired_spans] + ends = ends[desired_spans] + scores = candidates[0, starts, ends] + + return starts, ends, scores + + +def select_starts_ends( + start, + end, + p_mask, + attention_mask, + min_null_score=1000000, + top_k=1, + handle_impossible_answer=False, + max_answer_len=15, +): + """ + Takes the raw output of any `ModelForQuestionAnswering` and first normalizes its outputs and then uses + `decode_spans()` to generate probabilities for each span to be the actual answer. + + Args: + start (`np.ndarray`): Individual start logits for each token. + end (`np.ndarray`): Individual end logits for each token. + p_mask (`np.ndarray`): A mask with 1 for values that cannot be in the answer + attention_mask (`np.ndarray`): The attention mask generated by the tokenizer + min_null_score(`float`): The minimum null (empty) answer score seen so far. + topk (`int`): Indicates how many possible answer span(s) to extract from the model output. + handle_impossible_answer(`bool`): Whether to allow null (empty) answers + max_answer_len (`int`): Maximum size of the answer to extract from the model's output. + """ + # Ensure padded tokens & question tokens cannot belong to the set of candidate answers. + undesired_tokens = np.abs(np.array(p_mask) - 1) + + if attention_mask is not None: + undesired_tokens = undesired_tokens & attention_mask + + # Generate mask + undesired_tokens_mask = undesired_tokens == 0.0 + + # Make sure non-context indexes in the tensor cannot contribute to the softmax + start = np.where(undesired_tokens_mask, -10000.0, start) + end = np.where(undesired_tokens_mask, -10000.0, end) + + # Normalize logits and spans to retrieve the answer + start = np.exp(start - start.max(axis=-1, keepdims=True)) + start = start / start.sum() + + end = np.exp(end - end.max(axis=-1, keepdims=True)) + end = end / end.sum() + + if handle_impossible_answer: + min_null_score = min(min_null_score, (start[0, 0] * end[0, 0]).item()) + + # Mask CLS + start[0, 0] = end[0, 0] = 0.0 + + starts, ends, scores = decode_spans(start, end, top_k, max_answer_len, undesired_tokens) + return starts, ends, scores, min_null_score + + class QuestionAnsweringArgumentHandler(ArgumentHandler): """ QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped to @@ -141,7 +245,7 @@ def __init__( framework: Optional[str] = None, device: int = -1, task: str = "", - **kwargs + **kwargs, ): super().__init__( model=model, @@ -410,34 +514,15 @@ def postprocess( start_ = output["start"] end_ = output["end"] example = output["example"] + p_mask = output["p_mask"] + attention_mask = ( + output["attention_mask"].numpy() if output.get("attention_mask", None) is not None else None + ) - # Ensure padded tokens & question tokens cannot belong to the set of candidate answers. - undesired_tokens = np.abs(np.array(output["p_mask"]) - 1) - - if output.get("attention_mask", None) is not None: - undesired_tokens = undesired_tokens & output["attention_mask"].numpy() - - # Generate mask - undesired_tokens_mask = undesired_tokens == 0.0 - - # Make sure non-context indexes in the tensor cannot contribute to the softmax - start_ = np.where(undesired_tokens_mask, -10000.0, start_) - end_ = np.where(undesired_tokens_mask, -10000.0, end_) - - # Normalize logits and spans to retrieve the answer - start_ = np.exp(start_ - start_.max(axis=-1, keepdims=True)) - start_ = start_ / start_.sum() - - end_ = np.exp(end_ - end_.max(axis=-1, keepdims=True)) - end_ = end_ / end_.sum() - - if handle_impossible_answer: - min_null_score = min(min_null_score, (start_[0, 0] * end_[0, 0]).item()) - - # Mask CLS - start_[0, 0] = end_[0, 0] = 0.0 + starts, ends, scores, min_null_score = select_starts_ends( + start_, end_, p_mask, attention_mask, min_null_score, top_k, handle_impossible_answer, max_answer_len + ) - starts, ends, scores = self.decode(start_, end_, top_k, max_answer_len, undesired_tokens) if not self.tokenizer.is_fast: char_to_word = np.array(example.char_to_word_offset) @@ -518,55 +603,6 @@ def get_indices( end_index = enc.offsets[e][1] return start_index, end_index - def decode( - self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int, undesired_tokens: np.ndarray - ) -> Tuple: - """ - Take the output of any `ModelForQuestionAnswering` and will generate probabilities for each span to be the - actual answer. - - In addition, it filters out some unwanted/impossible cases like answer len being greater than max_answer_len or - answer end position being before the starting position. The method supports output the k-best answer through - the topk argument. - - Args: - start (`np.ndarray`): Individual start probabilities for each token. - end (`np.ndarray`): Individual end probabilities for each token. - topk (`int`): Indicates how many possible answer span(s) to extract from the model output. - max_answer_len (`int`): Maximum size of the answer to extract from the model's output. - undesired_tokens (`np.ndarray`): Mask determining tokens that can be part of the answer - """ - # Ensure we have batch axis - if start.ndim == 1: - start = start[None] - - if end.ndim == 1: - end = end[None] - - # Compute the score of each tuple(start, end) to be the real answer - outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1)) - - # Remove candidate with end < start and end - start > max_answer_len - candidates = np.tril(np.triu(outer), max_answer_len - 1) - - # Inspired by Chen & al. (https://github.com/facebookresearch/DrQA) - scores_flat = candidates.flatten() - if topk == 1: - idx_sort = [np.argmax(scores_flat)] - elif len(scores_flat) < topk: - idx_sort = np.argsort(-scores_flat) - else: - idx = np.argpartition(-scores_flat, topk)[0:topk] - idx_sort = idx[np.argsort(-scores_flat[idx])] - - starts, ends = np.unravel_index(idx_sort, candidates.shape)[1:] - desired_spans = np.isin(starts, undesired_tokens.nonzero()) & np.isin(ends, undesired_tokens.nonzero()) - starts = starts[desired_spans] - ends = ends[desired_spans] - scores = candidates[0, starts, ends] - - return starts, ends, scores - def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]: """ When decoding from token probabilities, this method maps token indexes to actual word in the initial context. diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 32ba979f78b62b..dbdf37da4c7161 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -358,6 +358,9 @@ def load_tf_weights_in_albert(*args, **kwargs): MODEL_FOR_CTC_MAPPING = None +MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None + + MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None @@ -463,6 +466,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class AutoModelForDocumentQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class AutoModelForImageClassification(metaclass=DummyObject): _backends = ["torch"] diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index bc3eb64ca46dab..69e11eeb31d605 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -265,6 +265,9 @@ def __init__(self, *args, **kwargs): TF_MODEL_FOR_CAUSAL_LM_MAPPING = None +TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = None + + TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None @@ -327,6 +330,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +class TFAutoModelForDocumentQuestionAnswering(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + class TFAutoModelForImageClassification(metaclass=DummyObject): _backends = ["tf"] diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index aec3c950ae435a..c08f6766c9dfc4 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -36,6 +36,7 @@ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, @@ -71,6 +72,7 @@ def _generate_supported_model_class_names( "seq2seq-lm": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "speech-seq2seq": MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, "multiple-choice": MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, + "document-question-answering": MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "sequence-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, @@ -147,7 +149,6 @@ def _generate_supported_model_class_names( "GPT2DoubleHeadsModel", "Speech2Text2Decoder", "TrOCRDecoder", - "LayoutLMForQuestionAnswering", # TODO: add support for them as it should be quite easy to do so (small blocking issues). # XLNetForQuestionAnswering, ] @@ -691,7 +692,7 @@ def _generate_dummy_input( inputs_dict["labels"] = torch.zeros(batch_size, dtype=torch.long, device=device) elif model_class_name in [ *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES), - "LayoutLMForQuestionAnswering", + *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES), "XLNetForQuestionAnswering", ]: inputs_dict["start_positions"] = torch.zeros(batch_size, dtype=torch.long, device=device) diff --git a/tests/models/layoutlm/test_modeling_layoutlm.py b/tests/models/layoutlm/test_modeling_layoutlm.py index cce3c9b3f48615..16cacab88c8614 100644 --- a/tests/models/layoutlm/test_modeling_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_layoutlm.py @@ -12,12 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import copy import unittest from transformers import LayoutLMConfig, is_torch_available -from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester @@ -28,9 +25,6 @@ import torch from transformers import ( - MODEL_FOR_MASKED_LM_MAPPING, - MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, - MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMForMaskedLM, LayoutLMForQuestionAnswering, LayoutLMForSequenceClassification, @@ -273,30 +267,6 @@ def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) - def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): - inputs_dict = copy.deepcopy(inputs_dict) - if return_labels: - if model_class in get_values(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): - inputs_dict["labels"] = torch.zeros( - self.model_tester.batch_size, dtype=torch.long, device=torch_device - ) - elif model_class in [ - *get_values(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), - *get_values(MODEL_FOR_MASKED_LM_MAPPING), - ]: - inputs_dict["labels"] = torch.zeros( - (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device - ) - elif model_class.__name__ == "LayoutLMForQuestionAnswering": - inputs_dict["start_positions"] = torch.zeros( - self.model_tester.batch_size, dtype=torch.long, device=torch_device - ) - inputs_dict["end_positions"] = torch.zeros( - self.model_tester.batch_size, dtype=torch.long, device=torch_device - ) - - return inputs_dict - def prepare_layoutlm_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: diff --git a/tests/models/layoutlm/test_modeling_tf_layoutlm.py b/tests/models/layoutlm/test_modeling_tf_layoutlm.py index 9323b0bb9b97d2..4224f20a1da76d 100644 --- a/tests/models/layoutlm/test_modeling_tf_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_tf_layoutlm.py @@ -13,13 +13,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -import copy import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available -from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester @@ -29,11 +27,6 @@ if is_tf_available(): import tensorflow as tf - from transformers import ( - TF_MODEL_FOR_MASKED_LM_MAPPING, - TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, - TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, - ) from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, @@ -263,24 +256,6 @@ def test_model_from_pretrained(self): model = TFLayoutLMModel.from_pretrained(model_name) self.assertIsNotNone(model) - def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): - inputs_dict = copy.deepcopy(inputs_dict) - if return_labels: - if model_class in get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): - inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) - elif model_class in [ - *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), - *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), - ]: - inputs_dict["labels"] = tf.zeros( - (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 - ) - elif model_class.__name__ == "TFLayoutLMForQuestionAnswering": - inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) - inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) - - return inputs_dict - def prepare_layoutlm_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: diff --git a/tests/pipelines/test_pipelines_document_question_answering.py b/tests/pipelines/test_pipelines_document_question_answering.py new file mode 100644 index 00000000000000..7bf8ec99fb5922 --- /dev/null +++ b/tests/pipelines/test_pipelines_document_question_answering.py @@ -0,0 +1,280 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available +from transformers.pipelines import pipeline +from transformers.pipelines.document_question_answering import apply_tesseract +from transformers.testing_utils import ( + is_pipeline_test, + nested_simplify, + require_detectron2, + require_pytesseract, + require_tf, + require_torch, + require_vision, + slow, +) + +from .test_pipelines_common import ANY, PipelineTestCaseMeta + + +if is_vision_available(): + from PIL import Image + + from transformers.image_utils import load_image +else: + + class Image: + @staticmethod + def open(*args, **kwargs): + pass + + def load_image(_): + return None + + +# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, +# so we can expect it to be available. +INVOICE_URL = ( + "https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png" +) + + +@is_pipeline_test +@require_torch +@require_vision +class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): + model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING + + @require_pytesseract + @require_vision + def get_test_pipeline(self, model, tokenizer, feature_extractor): + dqa_pipeline = pipeline( + "document-question-answering", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor + ) + + image = INVOICE_URL + word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) + question = "What is the placebo?" + examples = [ + { + "image": load_image(image), + "question": question, + }, + { + "image": image, + "question": question, + }, + { + "image": image, + "question": question, + "word_boxes": word_boxes, + }, + { + "image": None, + "question": question, + "word_boxes": word_boxes, + }, + ] + return dqa_pipeline, examples + + def run_pipeline_test(self, dqa_pipeline, examples): + outputs = dqa_pipeline(examples, top_k=2) + self.assertEqual( + outputs, + [ + [ + {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, + {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, + ] + ] + * 4, + ) + + @require_torch + @require_detectron2 + @require_pytesseract + def test_small_model_pt(self): + dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2") + image = INVOICE_URL + question = "How many cats are there?" + + expected_output = [ + { + "score": 0.0001, + "answer": "2312/2019 DUE DATE 26102/2019 ay DESCRIPTION UNIT PRICE", + "start": 38, + "end": 45, + }, + {"score": 0.0001, "answer": "2312/2019 DUE", "start": 38, "end": 39}, + ] + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) + + outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) + self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) + + # This image does not detect ANY text in it, meaning layoutlmv2 should fail. + # Empty answer probably + image = "./tests/fixtures/tests_samples/COCO/000000039769.png" + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual(outputs, []) + + # We can optionnally pass directly the words and bounding boxes + image = "./tests/fixtures/tests_samples/COCO/000000039769.png" + words = [] + boxes = [] + outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2) + self.assertEqual(outputs, []) + + # TODO: Enable this once hf-internal-testing/tiny-random-donut is implemented + # @require_torch + # def test_small_model_pt_donut(self): + # dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut") + # # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut") + # image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" + # question = "How many cats are there?" + # + # outputs = dqa_pipeline(image=image, question=question, top_k=2) + # self.assertEqual( + # nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] + # ) + + @slow + @require_torch + @require_detectron2 + @require_pytesseract + def test_large_model_pt(self): + dqa_pipeline = pipeline( + "document-question-answering", + model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", + revision="9977165", + ) + image = INVOICE_URL + question = "What is the invoice number?" + + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9966, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0009, "answer": "us-001", "start": 15, "end": 15}, + ], + ) + + outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9966, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0009, "answer": "us-001", "start": 15, "end": 15}, + ], + ) + + outputs = dqa_pipeline( + [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 + ) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.9966, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0009, "answer": "us-001", "start": 15, "end": 15}, + ], + ] + * 2, + ) + + @slow + @require_torch + @require_pytesseract + @require_vision + def test_large_model_pt_layoutlm(self): + tokenizer = AutoTokenizer.from_pretrained( + "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True + ) + dqa_pipeline = pipeline( + "document-question-answering", + model="impira/layoutlm-document-qa", + tokenizer=tokenizer, + revision="3dc6de3", + ) + image = INVOICE_URL + question = "What is the invoice number?" + + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + ], + ) + + outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + ], + ) + + outputs = dqa_pipeline( + [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 + ) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + ] + ] + * 2, + ) + + word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) + + # This model should also work if `image` is set to None + outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + ], + ) + + @slow + @require_torch + def test_large_model_pt_donut(self): + dqa_pipeline = pipeline( + "document-question-answering", + model="naver-clova-ix/donut-base-finetuned-docvqa", + tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa"), + feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa", + ) + + image = INVOICE_URL + question = "What is the invoice number?" + outputs = dqa_pipeline(image=image, question=question, top_k=2) + self.assertEqual(nested_simplify(outputs, decimals=4), {"answer": "us-001"}) + + @require_tf + @unittest.skip("Document question answering not implemented in TF") + def test_small_model_tf(self): + pass diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 05921334a6b8bb..6c4814c1a87274 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -89,6 +89,7 @@ MODEL_FOR_AUDIO_XVECTOR_MAPPING, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, @@ -172,7 +173,10 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): if return_labels: if model_class in get_values(MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = torch.ones(self.model_tester.batch_size, dtype=torch.long, device=torch_device) - elif model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): + elif model_class in [ + *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING), + *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), + ]: inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) @@ -542,7 +546,10 @@ def test_attention_outputs(self): if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Question Answering model returns start_logits and end_logits - if model_class in get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING): + if model_class in [ + *get_values(MODEL_FOR_QUESTION_ANSWERING_MAPPING), + *get_values(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), + ]: correct_outlen += 1 # start_logits and end_logits instead of only 1 output if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index f3608f4b225d86..0ef457c03523eb 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -61,6 +61,7 @@ from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, @@ -149,7 +150,10 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> d if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) - elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING): + elif model_class in [ + *get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING), + *get_values(TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING), + ]: inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ From 6690ba3f4d036bc39bdf29ec98daf2c693442503 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 7 Sep 2022 19:46:14 +0200 Subject: [PATCH 238/539] pin TF 2.9.1 for self-hosted CIs (#18925) Co-authored-by: ydshieh --- docker/transformers-all-latest-gpu/Dockerfile | 2 +- docker/transformers-cpu/Dockerfile | 2 +- docker/transformers-gpu/Dockerfile | 2 +- docker/transformers-tensorflow-cpu/Dockerfile | 2 +- docker/transformers-tensorflow-gpu/Dockerfile | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index 4db6f51826f02b..502c9a61fd6c4a 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -32,7 +32,7 @@ RUN echo torch=$VERSION # TODO: We might need to specify proper versions that work with a specific torch version (especially for past CI). RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA -RUN python3 -m pip install --no-cache-dir -U tensorflow +RUN python3 -m pip install --no-cache-dir -U tensorflow==2.9.1 RUN python3 -m pip uninstall -y flax jax # Use installed torch version for `torch-scatter` to avid to deal with PYTORCH='pre'. diff --git a/docker/transformers-cpu/Dockerfile b/docker/transformers-cpu/Dockerfile index 0d22039a481f0d..75a4f20a3b18fd 100644 --- a/docker/transformers-cpu/Dockerfile +++ b/docker/transformers-cpu/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ jupyter \ - tensorflow-cpu \ + tensorflow-cpu==2.9.1 \ torch WORKDIR /workspace diff --git a/docker/transformers-gpu/Dockerfile b/docker/transformers-gpu/Dockerfile index 0212eaa2a72b26..fc5c818438ba25 100644 --- a/docker/transformers-gpu/Dockerfile +++ b/docker/transformers-gpu/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ jupyter \ - tensorflow \ + tensorflow==2.9.1 \ torch RUN git clone https://github.com/NVIDIA/apex diff --git a/docker/transformers-tensorflow-cpu/Dockerfile b/docker/transformers-tensorflow-cpu/Dockerfile index e4af2b84bdeb34..dbc81acbbb25a4 100644 --- a/docker/transformers-tensorflow-cpu/Dockerfile +++ b/docker/transformers-tensorflow-cpu/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ mkl \ - tensorflow-cpu + tensorflow-cpu==2.9.1 WORKDIR /workspace COPY . transformers/ diff --git a/docker/transformers-tensorflow-gpu/Dockerfile b/docker/transformers-tensorflow-gpu/Dockerfile index a05ace7d08e268..a24faad8f2fcfb 100644 --- a/docker/transformers-tensorflow-gpu/Dockerfile +++ b/docker/transformers-tensorflow-gpu/Dockerfile @@ -12,7 +12,7 @@ RUN git clone https://github.com/huggingface/transformers && cd transformers && RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-tensorflow,testing] # If set to nothing, will install the latest version -ARG TENSORFLOW='' +ARG TENSORFLOW='2.9.1' RUN [ ${#TENSORFLOW} -gt 0 ] && VERSION='tensorflow=='$TENSORFLOW'.*' || VERSION='tensorflow'; python3 -m pip install --no-cache-dir -U $VERSION RUN python3 -m pip uninstall -y torch flax From 639422187157bb19339e0ecdd6eeff7e1011a801 Mon Sep 17 00:00:00 2001 From: Yanming Wang Date: Wed, 7 Sep 2022 12:45:17 -0700 Subject: [PATCH 239/539] Fix XLA fp16 and bf16 error checking (#18913) * Fix XLA fp16 and bf16 error checking * Update src/transformers/training_args.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- src/transformers/training_args.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 646e9343571b99..4cba5773420ff7 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -93,6 +93,15 @@ def get_int_from_env(env_keys, default): return default +def get_xla_device_type(device: "torch.device") -> Optional[str]: + """ + Returns the xla device type (CPU|GPU|TPU) or None if the device is a non-xla device. + """ + if is_torch_tpu_available(): + return xm.xla_real_devices([device])[0].split(":")[0] + return None + + class OptimizerNames(ExplicitEnum): """ Stores the acceptable string identifiers for optimizers. @@ -1108,7 +1117,7 @@ def __post_init__(self): self.framework == "pt" and is_torch_available() and (self.device.type != "cuda") - and not (self.device.type == "xla" and "GPU_NUM_DEVICES" in os.environ) + and (get_xla_device_type(self.device) != "GPU") and (self.fp16 or self.fp16_full_eval) ): raise ValueError( @@ -1120,7 +1129,7 @@ def __post_init__(self): self.framework == "pt" and is_torch_available() and (self.device.type != "cuda") - and not (self.device.type == "xla" and "GPU_NUM_DEVICES" in os.environ) + and (get_xla_device_type(self.device) != "GPU") and (self.device.type != "cpu") and (self.bf16 or self.bf16_full_eval) ): From 737f6ad1f78cccc0daf18e85d98695ea0a91c7d9 Mon Sep 17 00:00:00 2001 From: Colin Dean Date: Wed, 7 Sep 2022 16:33:03 -0400 Subject: [PATCH 240/539] Starts on a list of external deps required for dev (#18929) * Starts on a list of external deps required for dev I've found that I need to install MeCab manually on my AS Mac. * Generalizes OS nascent dependency list Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- CONTRIBUTING.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7dbc492f7ef72b..8696b2c5da29d7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -171,6 +171,14 @@ Follow these steps to start contributing ([supported Python versions](https://gi If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `datasets` library. + + Depending on your OS, you might need to install some external libraries, as well, if the `pip` installation fails. + + For macOS, you will likely need [MeCab](https://taku910.github.io/mecab/), which can be installed from Homebrew: + + ```bash + brew install mecab + ``` 5. Develop the features on your branch. From 6519150c315bdcd415bbd115cec11e839f3eb866 Mon Sep 17 00:00:00 2001 From: lewtun Date: Wed, 7 Sep 2022 22:42:46 +0200 Subject: [PATCH 241/539] Add image height and width to ONNX dynamic axes (#18915) --- src/transformers/models/beit/configuration_beit.py | 2 +- src/transformers/models/clip/configuration_clip.py | 2 +- src/transformers/models/convnext/configuration_convnext.py | 2 +- .../models/data2vec/configuration_data2vec_vision.py | 2 +- src/transformers/models/deit/configuration_deit.py | 2 +- src/transformers/models/detr/configuration_detr.py | 2 +- src/transformers/models/layoutlmv3/configuration_layoutlmv3.py | 2 +- src/transformers/models/levit/configuration_levit.py | 2 +- src/transformers/models/mobilevit/configuration_mobilevit.py | 2 +- src/transformers/models/resnet/configuration_resnet.py | 2 +- src/transformers/models/vit/configuration_vit.py | 2 +- 11 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/transformers/models/beit/configuration_beit.py b/src/transformers/models/beit/configuration_beit.py index 092f33ad85d383..e4cdb1eda2e9ae 100644 --- a/src/transformers/models/beit/configuration_beit.py +++ b/src/transformers/models/beit/configuration_beit.py @@ -194,7 +194,7 @@ class BeitOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "num_channels"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) diff --git a/src/transformers/models/clip/configuration_clip.py b/src/transformers/models/clip/configuration_clip.py index a118b179e4c09f..f70d6a6064c7cc 100644 --- a/src/transformers/models/clip/configuration_clip.py +++ b/src/transformers/models/clip/configuration_clip.py @@ -332,7 +332,7 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("input_ids", {0: "batch", 1: "sequence"}), - ("pixel_values", {0: "batch"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("attention_mask", {0: "batch", 1: "sequence"}), ] ) diff --git a/src/transformers/models/convnext/configuration_convnext.py b/src/transformers/models/convnext/configuration_convnext.py index 0b31da4370bfc5..8e435b1ed1d9af 100644 --- a/src/transformers/models/convnext/configuration_convnext.py +++ b/src/transformers/models/convnext/configuration_convnext.py @@ -117,7 +117,7 @@ class ConvNextOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "num_channels"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) diff --git a/src/transformers/models/data2vec/configuration_data2vec_vision.py b/src/transformers/models/data2vec/configuration_data2vec_vision.py index d6fc7871766faf..e9712e99994150 100644 --- a/src/transformers/models/data2vec/configuration_data2vec_vision.py +++ b/src/transformers/models/data2vec/configuration_data2vec_vision.py @@ -193,7 +193,7 @@ class Data2VecVisionOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "num_channels"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) diff --git a/src/transformers/models/deit/configuration_deit.py b/src/transformers/models/deit/configuration_deit.py index 1e9154eeca4af2..f937ae8451fa2a 100644 --- a/src/transformers/models/deit/configuration_deit.py +++ b/src/transformers/models/deit/configuration_deit.py @@ -137,7 +137,7 @@ class DeiTOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "num_channels"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) diff --git a/src/transformers/models/detr/configuration_detr.py b/src/transformers/models/detr/configuration_detr.py index 604a7dad0f42e9..57ea9119e58b28 100644 --- a/src/transformers/models/detr/configuration_detr.py +++ b/src/transformers/models/detr/configuration_detr.py @@ -223,7 +223,7 @@ class DetrOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "num_channels"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) diff --git a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py index ddf86ceaa1a49b..c93bdf82b1708a 100644 --- a/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/configuration_layoutlmv3.py @@ -203,7 +203,7 @@ def inputs(self) -> Mapping[str, Mapping[int, str]]: ("input_ids", {0: "batch", 1: "sequence"}), ("attention_mask", {0: "batch", 1: "sequence"}), ("bbox", {0: "batch", 1: "sequence"}), - ("pixel_values", {0: "batch", 1: "sequence"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) else: diff --git a/src/transformers/models/levit/configuration_levit.py b/src/transformers/models/levit/configuration_levit.py index 69032a1faae6ad..38bf7c2d50f650 100644 --- a/src/transformers/models/levit/configuration_levit.py +++ b/src/transformers/models/levit/configuration_levit.py @@ -137,7 +137,7 @@ class LevitOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "num_channels"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) diff --git a/src/transformers/models/mobilevit/configuration_mobilevit.py b/src/transformers/models/mobilevit/configuration_mobilevit.py index e2b2c568f62d6f..83406c96d830c2 100644 --- a/src/transformers/models/mobilevit/configuration_mobilevit.py +++ b/src/transformers/models/mobilevit/configuration_mobilevit.py @@ -171,7 +171,7 @@ class MobileViTOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: - return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels"})]) + return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"})]) @property def outputs(self) -> Mapping[str, Mapping[int, str]]: diff --git a/src/transformers/models/resnet/configuration_resnet.py b/src/transformers/models/resnet/configuration_resnet.py index 61a7fc86de3afa..e80832fdbc9e2b 100644 --- a/src/transformers/models/resnet/configuration_resnet.py +++ b/src/transformers/models/resnet/configuration_resnet.py @@ -105,7 +105,7 @@ class ResNetOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "num_channels"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) diff --git a/src/transformers/models/vit/configuration_vit.py b/src/transformers/models/vit/configuration_vit.py index a65790f30100d6..546f0a3bcf6234 100644 --- a/src/transformers/models/vit/configuration_vit.py +++ b/src/transformers/models/vit/configuration_vit.py @@ -135,7 +135,7 @@ class ViTOnnxConfig(OnnxConfig): def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ - ("pixel_values", {0: "batch", 1: "num_channels"}), + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) From 90f6fe9155d2f477588d9ba2d7c697a1933e205a Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Wed, 7 Sep 2022 14:45:22 -0700 Subject: [PATCH 242/539] Skip some doctests in quicktour (#18927) * skip some code examples for doctests * make style * fix code snippet formatting * separate code snippet into two blocks --- docs/source/en/quicktour.mdx | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/docs/source/en/quicktour.mdx b/docs/source/en/quicktour.mdx index f1b3ca5bf0f688..3fcdb4fff22457 100644 --- a/docs/source/en/quicktour.mdx +++ b/docs/source/en/quicktour.mdx @@ -435,8 +435,8 @@ Depending on your task, you'll typically pass the following parameters to [`Trai 4. Your preprocessed train and test datasets: ```py - >>> train_dataset = dataset["train"] - >>> eval_dataset = dataset["eval"] + >>> train_dataset = dataset["train"] # doctest: +SKIP + >>> eval_dataset = dataset["eval"] # doctest: +SKIP ``` 5. A [`DataCollator`] to create a batch of examples from your dataset: @@ -459,13 +459,13 @@ Now gather all these classes in [`Trainer`]: ... eval_dataset=dataset["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, -... ) +... ) # doctest: +SKIP ``` When you're ready, call [`~Trainer.train`] to start training: ```py ->>> trainer.train() +>>> trainer.train() # doctest: +SKIP ``` @@ -498,24 +498,29 @@ All models are a standard [`tf.keras.Model`](https://www.tensorflow.org/api_docs >>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") ``` -3. Tokenize the dataset and pass it and the tokenizer to [`~TFPreTrainedModel.prepare_tf_dataset`]. You can also change the batch size and shuffle the dataset here if you'd like: +3. Create a function to tokenize the dataset: ```py >>> def tokenize_dataset(dataset): - ... return tokenizer(dataset["text"]) + ... return tokenizer(dataset["text"]) # doctest: +SKIP + ``` +4. Apply the tokenizer over the entire dataset with [`~datasets.Dataset.map`] and then pass the dataset and tokenizer to [`~TFPreTrainedModel.prepare_tf_dataset`]. You can also change the batch size and shuffle the dataset here if you'd like: - >>> dataset = dataset.map(tokenize_dataset) - >>> tf_dataset = model.prepare_tf_dataset(dataset, batch_size=16, shuffle=True, tokenizer=tokenizer) + ```py + >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP + >>> tf_dataset = model.prepare_tf_dataset( + ... dataset, batch_size=16, shuffle=True, tokenizer=tokenizer + ... ) # doctest: +SKIP ``` -4. When you're ready, you can call `compile` and `fit` to start training: +5. When you're ready, you can call `compile` and `fit` to start training: ```py >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer=Adam(3e-5)) - >>> model.fit(dataset) + >>> model.fit(dataset) # doctest: +SKIP ``` ## What's next? From 9832ac7c736519fcfeedb88c8368cf0ab08b2b58 Mon Sep 17 00:00:00 2001 From: Devlee247 <64190071+Devlee247@users.noreply.github.com> Date: Thu, 8 Sep 2022 20:32:41 +0900 Subject: [PATCH 243/539] Fix LayoutXLM wrong link in README (#18932) * fix LayoutXLM wrong link in README * fix LayoutXLM worng link in index.mdx --- README.md | 2 +- README_ko.md | 2 +- README_zh-hans.md | 2 +- README_zh-hant.md | 2 +- docs/source/de/index.mdx | 2 +- docs/source/en/index.mdx | 2 +- docs/source/es/index.mdx | 2 +- docs/source/it/index.mdx | 2 +- docs/source/pt/index.mdx | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 2b728c212b3da2..a04113b2744218 100644 --- a/README.md +++ b/README.md @@ -312,7 +312,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. -1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. +1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. 1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze. 1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. diff --git a/README_ko.md b/README_ko.md index a0df54a86c1a9b..a1226f4edc215b 100644 --- a/README_ko.md +++ b/README_ko.md @@ -264,7 +264,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. -1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. +1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. 1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze. 1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. diff --git a/README_zh-hans.md b/README_zh-hans.md index 516f66ff96e633..785f0c02f33963 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -288,7 +288,7 @@ conda install -c huggingface transformers 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) 由 Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou 发布。 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) 由 Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou 发布。 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (来自 Microsoft Research Asia) 伴随论文 [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) 由 Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei 发布。 -1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (来自 Microsoft Research Asia) 伴随论文 [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) 由 Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei 发布。 +1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) 由 Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei 发布。 1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (来自 AllenAI) 伴随论文 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 由 Iz Beltagy, Matthew E. Peters, Arman Cohan 发布。 1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (来自 Meta AI) 伴随论文 [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) 由 Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze 发布。 1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (来自 AllenAI) 伴随论文 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 由 Iz Beltagy, Matthew E. Peters, Arman Cohan 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 8b4c15dd13160d..fdbee86a38281d 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -300,7 +300,7 @@ conda install -c huggingface transformers 1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. -1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. +1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. 1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze. 1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. diff --git a/docs/source/de/index.mdx b/docs/source/de/index.mdx index 815fd1724f4412..c1841c5b0d34ed 100644 --- a/docs/source/de/index.mdx +++ b/docs/source/de/index.mdx @@ -105,7 +105,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen, 1. **[LayoutLM](model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. -1. **[LayoutXLM](model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. +1. **[LayoutXLM](model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. 1. **[LED](model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[LeViT](model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze. 1. **[Longformer](model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 15ac6aa50aefd9..25408a0bf4470e 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -104,7 +104,7 @@ The documentation is organized into five sections: 1. **[LayoutLM](model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. -1. **[LayoutXLM](model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. +1. **[LayoutXLM](model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. 1. **[LED](model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[LeViT](model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze. 1. **[Longformer](model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. diff --git a/docs/source/es/index.mdx b/docs/source/es/index.mdx index ad499cf1e7552e..65bf1c1a12c81e 100644 --- a/docs/source/es/index.mdx +++ b/docs/source/es/index.mdx @@ -92,7 +92,7 @@ La biblioteca actualmente contiene implementaciones de JAX, PyTorch y TensorFlow 1. **[ImageGPT](model_doc/imagegpt)** (de OpenAI) publicado con el paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) por Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. 1. **[LayoutLM](model_doc/layoutlm)** (de Microsoft Research Asia) publicado con el paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) por Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](model_doc/layoutlmv2)** (de Microsoft Research Asia) publicado con el paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) por Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. -1. **[LayoutXLM](model_doc/layoutlmv2)** (de Microsoft Research Asia) publicado con el paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) por Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. +1. **[LayoutXLM](model_doc/layoutxlm)** (de Microsoft Research Asia) publicado con el paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) por Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. 1. **[LED](model_doc/led)** (de AllenAI) publicado con el paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) por Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[Longformer](model_doc/longformer)** (de AllenAI) publicado con el paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) por Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[LUKE](model_doc/luke)** (de Studio Ousia) publicado con el paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) por Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto. diff --git a/docs/source/it/index.mdx b/docs/source/it/index.mdx index 3ee8da15ed2d03..9d477dc68d9ea8 100644 --- a/docs/source/it/index.mdx +++ b/docs/source/it/index.mdx @@ -101,7 +101,7 @@ La libreria attualmente contiene implementazioni in JAX, PyTorch e TensorFlow, p 1. **[LayoutLM](model_doc/layoutlm)** (da Microsoft Research Asia) rilasciato con il paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) da Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](model_doc/layoutlmv2)** (da Microsoft Research Asia) rilasciato con il paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) da Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. 1. **[LayoutLMv3](model_doc/layoutlmv3)** (da Microsoft Research Asia) rilasciato con il paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) da Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei. -1. **[LayoutXLM](model_doc/layoutlmv2)** (da Microsoft Research Asia) rilasciato con il paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) da Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. +1. **[LayoutXLM](model_doc/layoutlxlm)** (da Microsoft Research Asia) rilasciato con il paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) da Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. 1. **[LED](model_doc/led)** (da AllenAI) rilasciato con il paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) da Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[Longformer](model_doc/longformer)** (da AllenAI) rilasciato con il paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) da Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[LUKE](model_doc/luke)** (da Studio Ousia) rilasciato con il paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) da Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto. diff --git a/docs/source/pt/index.mdx b/docs/source/pt/index.mdx index 8288bba3a11ce1..11ec31166a63dc 100644 --- a/docs/source/pt/index.mdx +++ b/docs/source/pt/index.mdx @@ -106,7 +106,7 @@ Atualmente a biblioteca contém implementações do PyTorch, TensorFlow e JAX, p 1. **[ImageGPT](model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever. 1. **[LayoutLM](model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou. 1. **[LayoutLMv2](model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou. -1. **[LayoutXLM](model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. +1. **[LayoutXLM](model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei. 1. **[LED](model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[Longformer](model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. 1. **[LUKE](model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto. From bb6f6d53386bf2340eead6a8f9320ce61add3e96 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Thu, 8 Sep 2022 14:50:30 +0200 Subject: [PATCH 244/539] Add X-CLIP (#18852) * First draft * Improve conversion script * Make vision encoder work * More improvements * Improve conversion script * Fix quality * Add MultiframeIntegrationTransformer * More improvements * Make MiT output work * Fix quality * Add prompts generator * Add tests * Fix some tests * Fix some more tests * Fix more tests * Improve conversion script * Fix model outputs * Fix more tests * Add XClipProcessor * Use processor in conversion script * Fix integration test * Update README, fix docs * Fix all tests * Add MIT output to XClipOutput * Create better variable names * Rename XClip to XCLIP * Extend conversion script * Add support for large models * Add support for 16 frame models * Add another model' * Fix module issue * Apply suggestions from code review * Add figure to docs * Fix CLIPProcessor issue * Apply suggestions from code review * Delete file * Convert more checkpoints * Convert last checkpoint * Update nielsr to microsoft --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/xclip.mdx | 69 + src/transformers/__init__.py | 35 +- src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 4 + .../models/auto/feature_extraction_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 1 + .../models/auto/processing_auto.py | 1 + .../models/auto/tokenization_auto.py | 1 + src/transformers/models/clip/__init__.py | 4 +- src/transformers/models/x_clip/__init__.py | 73 + .../models/x_clip/configuration_x_clip.py | 368 ++++ .../convert_x_clip_original_pytorch_to_hf.py | 386 +++++ .../models/x_clip/modeling_x_clip.py | 1497 +++++++++++++++++ .../models/x_clip/processing_x_clip.py | 109 ++ src/transformers/utils/dummy_pt_objects.py | 31 + .../utils/dummy_vision_objects.py | 7 - tests/models/x_clip/__init__.py | 0 tests/models/x_clip/test_modeling_x_clip.py | 672 ++++++++ utils/check_config_docstrings.py | 1 + utils/check_repo.py | 2 + 26 files changed, 3260 insertions(+), 11 deletions(-) create mode 100644 docs/source/en/model_doc/xclip.mdx create mode 100644 src/transformers/models/x_clip/__init__.py create mode 100644 src/transformers/models/x_clip/configuration_x_clip.py create mode 100644 src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py create mode 100644 src/transformers/models/x_clip/modeling_x_clip.py create mode 100644 src/transformers/models/x_clip/processing_x_clip.py create mode 100644 tests/models/x_clip/__init__.py create mode 100644 tests/models/x_clip/test_modeling_x_clip.py diff --git a/README.md b/README.md index a04113b2744218..e832a113e488d7 100644 --- a/README.md +++ b/README.md @@ -383,6 +383,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[X-CLIP](https://huggingface.co/docs/transformers/main/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. diff --git a/README_ko.md b/README_ko.md index a1226f4edc215b..0566c31de2a41d 100644 --- a/README_ko.md +++ b/README_ko.md @@ -335,6 +335,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[X-CLIP](https://huggingface.co/docs/transformers/main/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. diff --git a/README_zh-hans.md b/README_zh-hans.md index 785f0c02f33963..a3bd914c09a05e 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -359,6 +359,7 @@ conda install -c huggingface transformers 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (来自 Facebook AI) 伴随论文 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 发布。 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[X-CLIP](https://huggingface.co/docs/transformers/main/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index fdbee86a38281d..f544dd5b53d922 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -371,6 +371,7 @@ conda install -c huggingface transformers 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[X-CLIP](https://huggingface.co/docs/transformers/main/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 3353e82ad1e42c..6135a830181a4b 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -470,6 +470,8 @@ title: Vision Text Dual Encoder - local: model_doc/visual_bert title: VisualBERT + - local: model_doc/xclip + title: X-CLIP title: Multimodal models - isExpanded: false sections: diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 25408a0bf4470e..6a1722cf33ffa5 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -175,6 +175,7 @@ The documentation is organized into five sections: 1. **[Wav2Vec2-Conformer](model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[X-CLIP](model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 1. **[XLM-ProphetNet](model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. @@ -312,6 +313,7 @@ Flax), PyTorch, and/or TensorFlow. | Wav2Vec2 | ✅ | ❌ | ✅ | ✅ | ✅ | | Wav2Vec2-Conformer | ❌ | ❌ | ✅ | ❌ | ❌ | | WavLM | ❌ | ❌ | ✅ | ❌ | ❌ | +| X-CLIP | ❌ | ❌ | ✅ | ❌ | ❌ | | XGLM | ✅ | ✅ | ✅ | ✅ | ✅ | | XLM | ✅ | ❌ | ✅ | ✅ | ❌ | | XLM-ProphetNet | ✅ | ❌ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/xclip.mdx b/docs/source/en/model_doc/xclip.mdx new file mode 100644 index 00000000000000..4d572b6760071d --- /dev/null +++ b/docs/source/en/model_doc/xclip.mdx @@ -0,0 +1,69 @@ + + +# X-CLIP + +## Overview + +The X-CLIP model was proposed in [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. +X-CLIP is a minimal extension of [CLIP](clip) for video. The model consists of a text encoder, a cross-frame vision encoder, a multi-frame integration Transformer, and a video-specific prompt generator. + +The abstract from the paper is the following: + +*Contrastive language-image pretraining has shown great success in learning visual-textual joint representation from web-scale data, demonstrating remarkable "zero-shot" generalization ability for various image tasks. However, how to effectively expand such new language-image pretraining methods to video domains is still an open problem. In this work, we present a simple yet effective approach that adapts the pretrained language-image models to video recognition directly, instead of pretraining a new model from scratch. More concretely, to capture the long-range dependencies of frames along the temporal dimension, we propose a cross-frame attention mechanism that explicitly exchanges information across frames. Such module is lightweight and can be plugged into pretrained language-image models seamlessly. Moreover, we propose a video-specific prompting scheme, which leverages video content information for generating discriminative textual prompts. Extensive experiments demonstrate that our approach is effective and can be generalized to different video recognition scenarios. In particular, under fully-supervised settings, our approach achieves a top-1 accuracy of 87.1% on Kinectics-400, while using 12 times fewer FLOPs compared with Swin-L and ViViT-H. In zero-shot experiments, our approach surpasses the current state-of-the-art methods by +7.6% and +14.9% in terms of top-1 accuracy under two popular protocols. In few-shot scenarios, our approach outperforms previous best methods by +32.1% and +23.1% when the labeled data is extremely limited.* + +Tips: + +- Usage of X-CLIP is identical to CLIP. + + + + X-CLIP architecture. Taken from the original paper. + +This model was contributed by [nielsr](https://huggingface.co/nielsr). +The original code can be found [here](https://github.com/microsoft/VideoX/tree/master/X-CLIP). + + +## XCLIPProcessor + +[[autodoc]] XCLIPProcessor + +## XCLIPConfig + +[[autodoc]] XCLIPConfig + - from_text_vision_configs + +## XCLIPTextConfig + +[[autodoc]] XCLIPTextConfig + +## XCLIPVisionConfig + +[[autodoc]] XCLIPVisionConfig + +## XCLIPModel + +[[autodoc]] XCLIPModel + - forward + - get_text_features + - get_video_features + +## XCLIPTextModel + +[[autodoc]] XCLIPTextModel + - forward + +## XCLIPVisionModel + +[[autodoc]] XCLIPVisionModel + - forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index e10e2ce0ba0a41..c94abb1dac6a3e 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -165,6 +165,7 @@ "models.clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", + "CLIPProcessor", "CLIPTextConfig", "CLIPTokenizer", "CLIPVisionConfig", @@ -368,6 +369,13 @@ "WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig", ], + "models.x_clip": [ + "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "XCLIPConfig", + "XCLIPProcessor", + "XCLIPTextConfig", + "XCLIPVisionConfig", + ], "models.xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"], "models.xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMTokenizer"], "models.xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"], @@ -641,7 +649,6 @@ _import_structure["image_utils"] = ["ImageFeatureExtractionMixin"] _import_structure["models.beit"].append("BeitFeatureExtractor") _import_structure["models.clip"].append("CLIPFeatureExtractor") - _import_structure["models.clip"].append("CLIPProcessor") _import_structure["models.convnext"].append("ConvNextFeatureExtractor") _import_structure["models.deit"].append("DeiTFeatureExtractor") _import_structure["models.detr"].append("DetrFeatureExtractor") @@ -988,6 +995,15 @@ "CLIPVisionModel", ] ) + _import_structure["models.x_clip"].extend( + [ + "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "XCLIPModel", + "XCLIPPreTrainedModel", + "XCLIPTextModel", + "XCLIPVisionModel", + ] + ) _import_structure["models.convbert"].extend( [ "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3012,6 +3028,7 @@ from .models.clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, + CLIPProcessor, CLIPTextConfig, CLIPTokenizer, CLIPVisionConfig, @@ -3189,6 +3206,13 @@ from .models.wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer from .models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM from .models.wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig + from .models.x_clip import ( + XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + XCLIPConfig, + XCLIPProcessor, + XCLIPTextConfig, + XCLIPVisionConfig, + ) from .models.xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig from .models.xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMTokenizer from .models.xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig @@ -3428,7 +3452,7 @@ else: from .image_utils import ImageFeatureExtractionMixin from .models.beit import BeitFeatureExtractor - from .models.clip import CLIPFeatureExtractor, CLIPProcessor + from .models.clip import CLIPFeatureExtractor from .models.convnext import ConvNextFeatureExtractor from .models.deit import DeiTFeatureExtractor from .models.detr import DetrFeatureExtractor @@ -4499,6 +4523,13 @@ WavLMModel, WavLMPreTrainedModel, ) + from .models.x_clip import ( + XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + XCLIPModel, + XCLIPPreTrainedModel, + XCLIPTextModel, + XCLIPVisionModel, + ) from .models.xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel from .models.xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 05e0d6ea132079..9cc42c8d1240f0 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -151,6 +151,7 @@ wav2vec2_phoneme, wav2vec2_with_lm, wavlm, + x_clip, xglm, xlm, xlm_prophetnet, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index c387b89530d916..fe50973ac71696 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -144,6 +144,7 @@ ("wav2vec2", "Wav2Vec2Config"), ("wav2vec2-conformer", "Wav2Vec2ConformerConfig"), ("wavlm", "WavLMConfig"), + ("xclip", "XCLIPConfig"), ("xglm", "XGLMConfig"), ("xlm", "XLMConfig"), ("xlm-prophetnet", "XLMProphetNetConfig"), @@ -259,6 +260,7 @@ ("vit_mae", "VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("wav2vec2", "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("wav2vec2-conformer", "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("xclip", "X_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("xglm", "XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("xlm", "XLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("xlm-prophetnet", "XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -408,6 +410,7 @@ ("wav2vec2-conformer", "Wav2Vec2-Conformer"), ("wav2vec2_phoneme", "Wav2Vec2Phoneme"), ("wavlm", "WavLM"), + ("xclip", "X-CLIP"), ("xglm", "XGLM"), ("xlm", "XLM"), ("xlm-prophetnet", "XLM-ProphetNet"), @@ -428,6 +431,7 @@ ("data2vec-text", "data2vec"), ("data2vec-vision", "data2vec"), ("donut-swin", "donut"), + ("xclip", "x_clip"), ] ) diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index 3058aaa4334a20..625b79db06494e 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -75,6 +75,7 @@ ("vit_mae", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), + ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 1cb0ae44db0105..79b58d49383d0c 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -138,6 +138,7 @@ ("wav2vec2", "Wav2Vec2Model"), ("wav2vec2-conformer", "Wav2Vec2ConformerModel"), ("wavlm", "WavLMModel"), + ("xclip", "XCLIPModel"), ("xglm", "XGLMModel"), ("xlm", "XLMModel"), ("xlm-prophetnet", "XLMProphetNetModel"), diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index c6f4fd98316a44..7eff84c5d56714 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -58,6 +58,7 @@ ("wav2vec2-conformer", "Wav2Vec2Processor"), ("wav2vec2_with_lm", "Wav2Vec2ProcessorWithLM"), ("wavlm", "Wav2Vec2Processor"), + ("xclip", "CLIPProcessor"), ] ) diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 8ece13b79fe3fa..9eb802b1fb1d86 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -253,6 +253,7 @@ ("wav2vec2", ("Wav2Vec2CTCTokenizer", None)), ("wav2vec2-conformer", ("Wav2Vec2CTCTokenizer", None)), ("wav2vec2_phoneme", ("Wav2Vec2PhonemeCTCTokenizer", None)), + ("xclip", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), ( "xglm", ( diff --git a/src/transformers/models/clip/__init__.py b/src/transformers/models/clip/__init__.py index 932130f8d5fdf9..637d78b0da7994 100644 --- a/src/transformers/models/clip/__init__.py +++ b/src/transformers/models/clip/__init__.py @@ -36,6 +36,7 @@ "CLIPTextConfig", "CLIPVisionConfig", ], + "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } @@ -54,7 +55,6 @@ pass else: _import_structure["feature_extraction_clip"] = ["CLIPFeatureExtractor"] - _import_structure["processing_clip"] = ["CLIPProcessor"] try: if not is_torch_available(): @@ -108,6 +108,7 @@ CLIPTextConfig, CLIPVisionConfig, ) + from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: @@ -125,7 +126,6 @@ pass else: from .feature_extraction_clip import CLIPFeatureExtractor - from .processing_clip import CLIPProcessor try: if not is_torch_available(): diff --git a/src/transformers/models/x_clip/__init__.py b/src/transformers/models/x_clip/__init__.py new file mode 100644 index 00000000000000..10d848b7bc4e65 --- /dev/null +++ b/src/transformers/models/x_clip/__init__.py @@ -0,0 +1,73 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = { + "configuration_x_clip": [ + "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", + "XCLIPConfig", + "XCLIPTextConfig", + "XCLIPVisionConfig", + ], + "processing_x_clip": ["XCLIPProcessor"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_x_clip"] = [ + "XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", + "XCLIPModel", + "XCLIPPreTrainedModel", + "XCLIPTextModel", + "XCLIPVisionModel", + ] + +if TYPE_CHECKING: + from .configuration_x_clip import ( + XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, + XCLIPConfig, + XCLIPTextConfig, + XCLIPVisionConfig, + ) + from .processing_x_clip import XCLIPProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_x_clip import ( + XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, + XCLIPModel, + XCLIPPreTrainedModel, + XCLIPTextModel, + XCLIPVisionModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/x_clip/configuration_x_clip.py b/src/transformers/models/x_clip/configuration_x_clip.py new file mode 100644 index 00000000000000..30f9214eb8b4b5 --- /dev/null +++ b/src/transformers/models/x_clip/configuration_x_clip.py @@ -0,0 +1,368 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" X-CLIP model configuration""" + +import copy +import os +from typing import Union + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "microsoft/xclip-base-patch32": "https://huggingface.co/microsoft/xclip-base-patch32/resolve/main/config.json", +} + + +class XCLIPTextConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`XCLIPModel`]. It is used to instantiate an X-CLIP + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the X-CLIP + [microsoft/xclip-base-patch32](https://huggingface.co/microsoft/xclip-base-patch32) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 49408): + Vocabulary size of the X-CLIP text model. Defines the number of different tokens that can be represented by + the `inputs_ids` passed when calling [`XCLIPModel`]. + hidden_size (`int`, *optional*, defaults to 512): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (`int`, *optional*, defaults to 2048): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer encoder. + max_position_embeddings (`int`, *optional*, defaults to 77): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float``, *optional*, defaults to 1): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + + Example: + + ```python + >>> from transformers import XCLIPTextModel, XCLIPTextConfig + + >>> # Initializing a XCLIPTextModel with microsoft/xclip-base-patch32 style configuration + >>> configuration = XCLIPTextConfig() + + >>> # Initializing a XCLIPTextConfig from the microsoft/xclip-base-patch32 style configuration + >>> model = XCLIPTextModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "xclip_text_model" + + def __init__( + self, + vocab_size=49408, + hidden_size=512, + intermediate_size=2048, + num_hidden_layers=12, + num_attention_heads=8, + max_position_embeddings=77, + hidden_act="quick_gelu", + layer_norm_eps=0.00001, + dropout=0.0, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + **kwargs + ): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.dropout = dropout + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.max_position_embeddings = max_position_embeddings + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the text config dict if we are loading from XCLIPConfig + if config_dict.get("model_type") == "xclip": + config_dict = config_dict["text_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class XCLIPVisionConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`XCLIPModel`]. It is used to instantiate an X-CLIP + model according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the X-CLIP + [microsoft/xclip-base-patch32](https://huggingface.co/microsoft/xclip-base-patch32) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + mit_hidden_size (`int`, *optional*, defaults to 512): + Dimensionality of the encoder layers of the Multiframe Integration Transformer (MIT). + mit_intermediate_size (`int`, *optional*, defaults to 2048): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Multiframe Integration Transformer + (MIT). + mit_num_hidden_layers (`int`, *optional*, defaults to 1): + Number of hidden layers in the Multiframe Integration Transformer (MIT). + mit_num_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Multiframe Integration Transformer (MIT). + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 32): + The size (resolution) of each patch. + hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"`, `"gelu_new"` and ``"quick_gelu"` are supported. + layer_norm_eps (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + initializer_factor (`float``, *optional*, defaults to 1): + A factor for initializing all weight matrices (should be kept to 1, used internally for initialization + testing). + drop_path_rate (`float`, *optional*, defaults to 0.0): + Stochastic depth rate. + + Example: + + ```python + >>> from transformers import XCLIPVisionModel, XCLIPVisionConfig + + >>> # Initializing a XCLIPVisionModel with microsoft/xclip-base-patch32 style configuration + >>> configuration = XCLIPVisionConfig() + + >>> # Initializing a XCLIPVisionModel model from the microsoft/xclip-base-patch32 style configuration + >>> model = XCLIPVisionModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "xclip_vision_model" + + def __init__( + self, + hidden_size=768, + intermediate_size=3072, + num_hidden_layers=12, + num_attention_heads=12, + mit_hidden_size=512, + mit_intermediate_size=2048, + mit_num_hidden_layers=1, + mit_num_attention_heads=8, + num_channels=3, + image_size=224, + patch_size=32, + num_frames=8, + hidden_act="quick_gelu", + layer_norm_eps=0.00001, + dropout=0.0, + attention_dropout=0.0, + initializer_range=0.02, + initializer_factor=1.0, + drop_path_rate=0.0, + **kwargs + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.dropout = dropout + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.mit_hidden_size = mit_hidden_size + self.mit_intermediate_size = mit_intermediate_size + self.mit_num_hidden_layers = mit_num_hidden_layers + self.mit_num_attention_heads = mit_num_attention_heads + self.num_channels = num_channels + self.patch_size = patch_size + self.num_frames = num_frames + self.image_size = image_size + self.initializer_range = initializer_range + self.initializer_factor = initializer_factor + self.attention_dropout = attention_dropout + self.layer_norm_eps = layer_norm_eps + self.hidden_act = hidden_act + self.drop_path_rate = drop_path_rate + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig": + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the vision config dict if we are loading from XCLIPConfig + if config_dict.get("model_type") == "xclip": + config_dict = config_dict["vision_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +class XCLIPConfig(PretrainedConfig): + r""" + [`XCLIPConfig`] is the configuration class to store the configuration of a [`XCLIPModel`]. It is used to + instantiate X-CLIP model according to the specified arguments, defining the text model and vision model configs. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + text_config_dict (`dict`, *optional*): + Dictionary of configuration options used to initialize [`XCLIPTextConfig`]. + vision_config_dict (`dict`, *optional*): + Dictionary of configuration options used to initialize [`XCLIPVisionConfig`]. + projection_dim (`int`, *optional*, defaults to 512): + Dimentionality of text and vision projection layers. + prompt_layers (`int`, *optional*, defaults to 2): + Number of layers in the video specific prompt generator. + prompt_alpha (`float`, *optional*, defaults to 0.1): + Alpha value to use in the video specific prompt generator. + prompt_hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`): + The non-linear activation function (function or string) in the video specific prompt generator. If string, + `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported. + prompt_num_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads in the cross-attention of the video specific prompt generator. + prompt_attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for the attention layers in the video specific prompt generator. + prompt_projection_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for the projection layers in the video specific prompt generator. + logit_scale_init_value (`float`, *optional*, defaults to 2.6592): + The inital value of the *logit_scale* parameter. Default is used as per the original XCLIP implementation. + kwargs (*optional*): + Dictionary of keyword arguments. + """ + + model_type = "xclip" + is_composition = True + + def __init__( + self, + text_config_dict=None, + vision_config_dict=None, + projection_dim=512, + prompt_layers=2, + prompt_alpha=0.1, + prompt_hidden_act="quick_gelu", + prompt_num_attention_heads=8, + prompt_attention_dropout=0.0, + prompt_projection_dropout=0.0, + logit_scale_init_value=2.6592, + **kwargs + ): + super().__init__(text_config_dict=text_config_dict, vision_config_dict=vision_config_dict, **kwargs) + + if text_config_dict is None: + text_config_dict = {} + logger.info("text_config_dict is None. Initializing the XCLIPTextConfig with default values.") + + if vision_config_dict is None: + vision_config_dict = {} + logger.info("vision_config_dict is None. initializing the XCLIPVisionConfig with default values.") + + self.text_config = XCLIPTextConfig(**text_config_dict) + self.vision_config = XCLIPVisionConfig(**vision_config_dict) + + self.projection_dim = projection_dim + self.prompt_layers = prompt_layers + self.prompt_alpha = prompt_alpha + self.prompt_hidden_act = prompt_hidden_act + self.prompt_num_attention_heads = prompt_num_attention_heads + self.prompt_attention_dropout = prompt_attention_dropout + self.prompt_projection_dropout = prompt_projection_dropout + self.logit_scale_init_value = logit_scale_init_value + self.initializer_factor = 1.0 + + @classmethod + def from_text_vision_configs(cls, text_config: XCLIPTextConfig, vision_config: XCLIPVisionConfig, **kwargs): + r""" + Instantiate a [`XCLIPConfig`] (or a derived class) from xclip text model configuration and xclip vision model + configuration. + + Returns: + [`XCLIPConfig`]: An instance of a configuration object + """ + + return cls(text_config_dict=text_config.to_dict(), vision_config_dict=vision_config.to_dict(), **kwargs) + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. + + Returns: + `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, + """ + output = copy.deepcopy(self.__dict__) + output["text_config"] = self.text_config.to_dict() + output["vision_config"] = self.vision_config.to_dict() + output["model_type"] = self.__class__.model_type + return output diff --git a/src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py b/src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py new file mode 100644 index 00000000000000..2f5364f440986f --- /dev/null +++ b/src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py @@ -0,0 +1,386 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +import numpy as np +import torch + +import gdown +from huggingface_hub import hf_hub_download +from transformers import ( + CLIPTokenizer, + CLIPTokenizerFast, + VideoMAEFeatureExtractor, + XCLIPConfig, + XCLIPModel, + XCLIPProcessor, + XCLIPTextConfig, + XCLIPVisionConfig, +) + + +def get_xclip_config(model_name, num_frames): + text_config = XCLIPTextConfig() + + # derive patch size from model name + start_idx = model_name.find("patch") + patch_size = int(model_name[start_idx + len("patch") : start_idx + len("patch") + 2]) + vision_config = XCLIPVisionConfig(patch_size=patch_size, num_frames=num_frames) + + if "large" in model_name: + text_config.hidden_size = 768 + text_config.intermediate_size = 3072 + text_config.num_attention_heads = 12 + + vision_config.hidden_size = 1024 + vision_config.intermediate_size = 4096 + vision_config.num_attention_heads = 16 + vision_config.num_hidden_layers = 24 + vision_config.mit_hidden_size = 768 + vision_config.mit_intermediate_size = 3072 + + if model_name == "xclip-large-patch14-16-frames": + vision_config.image_size = 336 + + config = XCLIPConfig.from_text_vision_configs(text_config, vision_config) + + if "large" in model_name: + config.projection_dim = 768 + + return config + + +def rename_key(name): + # text encoder + if name == "token_embedding.weight": + name = name.replace("token_embedding.weight", "text_model.embeddings.token_embedding.weight") + if name == "positional_embedding": + name = name.replace("positional_embedding", "text_model.embeddings.position_embedding.weight") + if "ln_1" in name: + name = name.replace("ln_1", "layer_norm1") + if "ln_2" in name: + name = name.replace("ln_2", "layer_norm2") + if "c_fc" in name: + name = name.replace("c_fc", "fc1") + if "c_proj" in name: + name = name.replace("c_proj", "fc2") + if name.startswith("transformer.resblocks"): + name = name.replace("transformer.resblocks", "text_model.encoder.layers") + if "attn.out_proj" in name and "message" not in name: + name = name.replace("attn.out_proj", "self_attn.out_proj") + if "ln_final" in name: + name = name.replace("ln_final", "text_model.final_layer_norm") + # visual encoder + if name == "visual.class_embedding": + name = name.replace("visual.class_embedding", "vision_model.embeddings.class_embedding") + if name == "visual.positional_embedding": + name = name.replace("visual.positional_embedding", "vision_model.embeddings.position_embedding.weight") + if name.startswith("visual.transformer.resblocks"): + name = name.replace("visual.transformer.resblocks", "vision_model.encoder.layers") + if "visual.conv1" in name: + name = name.replace("visual.conv1", "vision_model.embeddings.patch_embedding") + if "visual.ln_pre" in name: + name = name.replace("visual.ln_pre", "vision_model.pre_layernorm") + if "visual.ln_post" in name: + name = name.replace("visual.ln_post", "vision_model.post_layernorm") + if "visual.proj" in name: + name = name.replace("visual.proj", "visual_projection.weight") + if "text_projection" in name: + name = name.replace("text_projection", "text_projection.weight") + # things on top + if "prompts_visual_proj" in name: + name = name.replace("prompts_visual_proj", "prompts_visual_projection") + if "prompts_visual_ln" in name: + name = name.replace("prompts_visual_ln", "prompts_visual_layernorm") + # mit + if name == "mit.positional_embedding": + name = name.replace("positional", "position") + if name.startswith("mit.resblocks"): + name = name.replace("mit.resblocks", "mit.encoder.layers") + # prompts generator + if name.startswith("prompts_generator.norm"): + name = name.replace("prompts_generator.norm", "prompts_generator.layernorm") + + return name + + +def convert_state_dict(orig_state_dict, config): + for key in orig_state_dict.copy().keys(): + val = orig_state_dict.pop(key) + + if "attn.in_proj" in key: + key_split = key.split(".") + if key.startswith("visual"): + layer_num = key_split[3] + dim = config.vision_config.hidden_size + if "message_attn" in key: + if "weight" in key: + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.q_proj.weight"] = val[ + :dim, : + ] + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.k_proj.weight"] = val[ + dim : dim * 2, : + ] + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.v_proj.weight"] = val[ + -dim:, : + ] + else: + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.q_proj.bias"] = val[ + :dim + ] + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.k_proj.bias"] = val[ + dim : dim * 2 + ] + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.message_attn.v_proj.bias"] = val[ + -dim: + ] + else: + if "weight" in key: + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[ + :dim, : + ] + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[ + dim : dim * 2, : + ] + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[ + -dim:, : + ] + else: + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim] + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[ + dim : dim * 2 + ] + orig_state_dict[f"vision_model.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:] + elif key.startswith("mit"): + layer_num = key_split[2] + dim = config.vision_config.mit_hidden_size + if "weight" in key: + orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :] + orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[dim : dim * 2, :] + orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :] + else: + orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim] + orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[dim : dim * 2] + orig_state_dict[f"mit.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:] + else: + layer_num = key_split[2] + dim = config.text_config.hidden_size + if "weight" in key: + orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.weight"] = val[:dim, :] + orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.weight"] = val[ + dim : dim * 2, : + ] + orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.weight"] = val[-dim:, :] + else: + orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.q_proj.bias"] = val[:dim] + orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.k_proj.bias"] = val[ + dim : dim * 2 + ] + orig_state_dict[f"text_model.encoder.layers.{layer_num}.self_attn.v_proj.bias"] = val[-dim:] + else: + new_key_name = rename_key(key) + if new_key_name in ["visual_projection.weight", "text_projection.weight"]: + val = val.T + orig_state_dict[new_key_name] = val + + return orig_state_dict + + +def prepare_video(num_frames): + if num_frames == 8: + filename = "eating_spaghetti_8_frames.npy" + elif num_frames == 16: + filename = "eating_spaghetti.npy" + elif num_frames == 32: + filename = "eating_spaghetti_32_frames.npy" + file = hf_hub_download( + repo_id="datasets/hf-internal-testing/spaghetti-video", + filename=filename, + ) + video = np.load(file) + return list(video) + + +def convert_xclip_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): + + model_to_url = { + # fully supervised kinetics-400 checkpoints + "xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth", + "xclip-base-patch32-16-frames": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth" + ), + "xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth", + "xclip-base-patch16-16-frames": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth" + ), + "xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb", + "xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f", + # fully supervised kinetics-600 checkpoints + "xclip-base-patch16-kinetics-600": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth" + ), + "xclip-base-patch16-kinetics-600-16-frames": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth" + ), + "xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be", + # few shot + "xclip-base-patch16-hmdb-2-shot": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth" + ), + "xclip-base-patch16-hmdb-4-shot": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth" + ), + "xclip-base-patch16-hmdb-8-shot": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth" + ), + "xclip-base-patch16-hmdb-16-shot": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth" + ), + "xclip-base-patch16-ucf-2-shot": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth" + ), + "xclip-base-patch16-ucf-4-shot": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth" + ), + "xclip-base-patch16-ucf-8-shot": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth" + ), + "xclip-base-patch16-ucf-16-shot": ( + "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth" + ), + # zero shot + "xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth", + } + + checkpoint_url = model_to_url[model_name] + num_frames = 8 + if "16-frames" in model_name: + num_frames = 16 + elif "shot" in model_name: + num_frames = 32 + + config = get_xclip_config(model_name, num_frames) + model = XCLIPModel(config) + model.eval() + + if "drive" in checkpoint_url: + output = "pytorch_model.bin" + gdown.cached_download(checkpoint_url, output, quiet=False) + state_dict = torch.load(output, map_location="cpu")["model"] + else: + state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)["model"] + + state_dict = convert_state_dict(state_dict, config) + + model = XCLIPModel(config) + missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) + assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] + model.eval() + + size = 336 if model_name == "xclip-large-patch14-16-frames" else 224 + feature_extractor = VideoMAEFeatureExtractor(size=size) + slow_tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32") + fast_tokenizer = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32") + processor = XCLIPProcessor(feature_extractor=feature_extractor, tokenizer=fast_tokenizer) + + video = prepare_video(num_frames) + inputs = processor( + text=["playing sports", "eating spaghetti", "go shopping"], videos=video, return_tensors="pt", padding=True + ) + + print("Shape of pixel values:", inputs.pixel_values.shape) + + with torch.no_grad(): + outputs = model(**inputs) + + # Verify outputs + logits_per_video = outputs.logits_per_video + probs = logits_per_video.softmax(dim=1) + print("Probs:", probs) + # kinetics-400 + if model_name == "xclip-base-patch32": + expected_probs = torch.tensor([[0.0019, 0.9951, 0.0030]]) + elif model_name == "xclip-base-patch32-16-frames": + expected_probs = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]]) + elif model_name == "xclip-base-patch16": + expected_probs = torch.tensor([[0.0083, 0.9681, 0.0236]]) + elif model_name == "xclip-base-patch16-16-frames": + expected_probs = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]]) + elif model_name == "xclip-large-patch14": + expected_probs = torch.tensor([[0.0062, 0.9864, 0.0075]]) + elif model_name == "xclip-large-patch14-16-frames": + expected_probs = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]]) + # kinetics-600 + elif model_name == "xclip-base-patch16-kinetics-600": + expected_probs = torch.tensor([[0.0555, 0.8914, 0.0531]]) + elif model_name == "xclip-base-patch16-kinetics-600-16-frames": + expected_probs = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]]) + elif model_name == "xclip-large-patch14-kinetics-600": + expected_probs = torch.tensor([[0.0036, 0.9920, 0.0045]]) + # few shot + elif model_name == "xclip-base-patch16-hmdb-2-shot": + expected_probs = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]]) + elif model_name == "xclip-base-patch16-hmdb-4-shot": + expected_probs = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]]) + elif model_name == "xclip-base-patch16-hmdb-8-shot": + expected_probs = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]]) + elif model_name == "xclip-base-patch16-hmdb-16-shot": + expected_probs = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]]) + elif model_name == "xclip-base-patch16-ucf-2-shot": + expected_probs = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]]) + elif model_name == "xclip-base-patch16-ucf-4-shot": + expected_probs = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]]) + elif model_name == "xclip-base-patch16-ucf-8-shot": + expected_probs = torch.tensor([[0.0027, 0.9904, 0.0070]]) + elif model_name == "xclip-base-patch16-ucf-16-shot": + expected_probs = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]]) + # zero shot + elif model_name == "xclip-base-patch16-zero-shot": + expected_probs = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]]) + else: + raise ValueError(f"Model name {model_name} not supported") + assert torch.allclose(probs, expected_probs, atol=1e-3) + print("Looks ok!") + + if pytorch_dump_folder_path is not None: + print(f"Saving model {model_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + + if push_to_hub: + print("Pushing model, processor and slow tokenizer files to the hub...") + model.push_to_hub(model_name, organization="nielsr") + processor.push_to_hub(model_name, organization="nielsr") + slow_tokenizer.push_to_hub(model_name, organization="nielsr") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--model_name", + default="xclip-base-patch32", + type=str, + help="Name of the model.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + + args = parser.parse_args() + convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) diff --git a/src/transformers/models/x_clip/modeling_x_clip.py b/src/transformers/models/x_clip/modeling_x_clip.py new file mode 100644 index 00000000000000..00ae9d720602ac --- /dev/null +++ b/src/transformers/models/x_clip/modeling_x_clip.py @@ -0,0 +1,1497 @@ +# coding=utf-8 +# Copyright 2022 Microsoft Research and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch X-CLIP model.""" + + +from copy import copy +from dataclasses import dataclass +from typing import Any, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_x_clip import XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "microsoft/xclip-base-patch32" + +XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "microsoft/xclip-base-patch32", + # See all X-CLIP models at https://huggingface.co/models?filter=x-clip +] + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +# contrastive loss function, adapted from +# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/clip.html +def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: + return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) + + +# Copied from transformers.models.clip.modeling_clip.clip_loss with clip->x_clip +def x_clip_loss(similarity: torch.Tensor) -> torch.Tensor: + caption_loss = contrastive_loss(similarity) + image_loss = contrastive_loss(similarity.t()) + return (caption_loss + image_loss) / 2.0 + + +@dataclass +class XCLIPOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for video-text similarity. + logits_per_video (`torch.FloatTensor` of shape `(video_batch_size, text_batch_size)`): + The scaled dot product scores between `video_embeds` and `text_embeds`. This represents the video-text + similarity scores. + logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, video_batch_size)`): + The scaled dot product scores between `text_embeds` and `video_embeds`. This represents the text-video + similarity scores. + text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of [`XCLIPTextModel`]. + video_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The video embeddings obtained by applying the projection layer to the pooled output of + [`XCLIPVisionModel`]. + text_model_output (`BaseModelOutputWithPooling`): + The output of the [`XCLIPTextModel`]. + vision_model_output (`BaseModelOutputWithPooling`): + The output of the [`XCLIPVisionModel`]. + mit_output (`BaseModelOutputWithPooling`): + The output of `XCLIPMultiframeIntegrationTransformer` (MIT for short). + """ + + loss: Optional[torch.FloatTensor] = None + logits_per_video: torch.FloatTensor = None + logits_per_text: torch.FloatTensor = None + text_embeds: torch.FloatTensor = None + video_embeds: torch.FloatTensor = None + text_model_output: BaseModelOutputWithPooling = None + vision_model_output: BaseModelOutputWithPooling = None + mit_output: BaseModelOutputWithPooling = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] + if k not in ["text_model_output", "vision_model_output", "mit_output"] + else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->XCLIP +class XCLIPVisionEmbeddings(nn.Module): + def __init__(self, config: XCLIPVisionConfig): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.image_size = config.image_size + self.patch_size = config.patch_size + + self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) + + self.patch_embedding = nn.Conv2d( + in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False + ) + + self.num_patches = (self.image_size // self.patch_size) ** 2 + self.num_positions = self.num_patches + 1 + self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) + self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1))) + + def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: + batch_size = pixel_values.shape[0] + patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + + +# Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->XCLIP +class XCLIPTextEmbeddings(nn.Module): + def __init__(self, config: XCLIPTextConfig): + super().__init__() + embed_dim = config.hidden_size + + self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) + self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.Tensor: + seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if inputs_embeds is None: + inputs_embeds = self.token_embedding(input_ids) + + position_embeddings = self.position_embedding(position_ids) + embeddings = inputs_embeds + position_embeddings + + return embeddings + + +# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->XCLIP +class XCLIPAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.config = config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + self.scale = self.head_dim**-0.5 + self.dropout = config.attention_dropout + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, embed_dim = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scale + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + # apply the causal_attention_mask first + if causal_attention_mask is not None: + if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {causal_attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit akward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->XCLIP +class XCLIPMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.activation_fn = ACT2FN[config.hidden_act] + self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) + self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->XCLIP +class XCLIPEncoderLayer(nn.Module): + def __init__(self, config: XCLIPConfig): + super().__init__() + self.embed_dim = config.hidden_size + self.self_attn = XCLIPAttention(config) + self.layer_norm1 = nn.LayerNorm(self.embed_dim) + self.mlp = XCLIPMLP(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + causal_attention_mask: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(config.encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.beit.modeling_beit.drop_path +def drop_path(input, drop_prob: float = 0.0, training: bool = False): + """ + Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). + + Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, + however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... + See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the + layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the + argument. + """ + if drop_prob == 0.0 or not training: + return input + keep_prob = 1 - drop_prob + shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets + random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) + random_tensor.floor_() # binarize + output = input.div(keep_prob) * random_tensor + return output + + +# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->XCLIP +class XCLIPDropPath(nn.Module): + """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" + + def __init__(self, drop_prob: Optional[float] = None) -> None: + super().__init__() + self.drop_prob = drop_prob + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return drop_path(x, self.drop_prob, self.training) + + def extra_repr(self) -> str: + return "p={}".format(self.drop_prob) + + +class XCLIPVisionEncoderLayer(nn.Module): + """ + This corresponds to the `CrossFramelAttentionBlock` class in the original implementation. + """ + + def __init__(self, config: XCLIPConfig): + super().__init__() + self.num_frames = config.num_frames + self.embed_dim = config.hidden_size + + self.message_fc = nn.Linear(self.embed_dim, self.embed_dim) + self.message_ln = nn.LayerNorm(self.embed_dim) + self.message_attn = XCLIPAttention(config) + + self.drop_path = XCLIPDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() + + self.self_attn = XCLIPAttention(config) + self.layer_norm1 = nn.LayerNorm(self.embed_dim) + self.mlp = XCLIPMLP(config) + self.layer_norm2 = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + causal_attention_mask: torch.Tensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(config.encoder_attention_heads,)`. + causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Causal mask for the text model. Mask values selected in `[0, 1]`: + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + [What are attention masks?](../glossary#attention-mask) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + batch_time, seq_length, hidden_size = hidden_states.size() + batch_size = batch_time // self.num_frames + msg_token = self.message_fc(hidden_states[:, 0, :]) + msg_token = msg_token.view(batch_size, self.num_frames, hidden_size) + + msg_token = msg_token + self.drop_path(self.message_attn(self.message_ln(msg_token))[0]) + # add dummy sequence dimension + msg_token = msg_token.view(-1, 1, hidden_size) + + hidden_states = torch.cat([hidden_states, msg_token], dim=1) + + residual = hidden_states + + hidden_states = self.layer_norm1(hidden_states) + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = residual + hidden_states + + hidden_states = hidden_states[:, :seq_length, :] + + residual = hidden_states + hidden_states = self.layer_norm2(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class XCLIPPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = XCLIPConfig + base_model_prefix = "x_clip" + supports_gradient_checkpointing = True + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """Initialize the weights""" + factor = self.config.initializer_factor + if isinstance(module, XCLIPTextEmbeddings): + module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) + module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) + elif isinstance(module, XCLIPVisionEmbeddings): + factor = self.config.initializer_factor + nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) + nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) + nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) + elif isinstance(module, XCLIPAttention): + factor = self.config.initializer_factor + in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + out_proj_std = (module.embed_dim**-0.5) * factor + nn.init.normal_(module.q_proj.weight, std=in_proj_std) + nn.init.normal_(module.k_proj.weight, std=in_proj_std) + nn.init.normal_(module.v_proj.weight, std=in_proj_std) + nn.init.normal_(module.out_proj.weight, std=out_proj_std) + elif isinstance(module, XCLIPMLP): + factor = self.config.initializer_factor + in_proj_std = ( + (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor + ) + fc_std = (2 * module.config.hidden_size) ** -0.5 * factor + nn.init.normal_(module.fc1.weight, std=fc_std) + nn.init.normal_(module.fc2.weight, std=in_proj_std) + elif isinstance(module, XCLIPModel): + factor = self.config.initializer_factor + nn.init.normal_( + module.text_projection.weight, + std=module.text_embed_dim**-0.5 * factor, + ) + nn.init.normal_( + module.visual_projection.weight, + std=module.vision_embed_dim**-0.5 * factor, + ) + nn.init.normal_(module.prompts_visual_projection, mean=0.0, std=module.vision_embed_dim**-0.5 * factor) + elif isinstance(module, XCLIPMultiframeIntegrationTransformer): + nn.init.normal_(module.position_embedding, std=self.config.initializer_factor) + + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor) + if module.bias is not None: + module.bias.data.zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (XCLIPEncoder, XCLIPVisionEncoder)): + module.gradient_checkpointing = value + + +X_CLIP_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`XCLIPConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +X_CLIP_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +X_CLIP_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`CLIPFeatureExtractor`]. See [`CLIPFeatureExtractor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +X_CLIP_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`CLIPFeatureExtractor`]. See [`CLIPFeatureExtractor.__call__`] for details. + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->XCLIP +class XCLIPEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`XCLIPEncoderLayer`]. + + Args: + config: XCLIPConfig + """ + + def __init__(self, config: XCLIPConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([XCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + inputs_embeds, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Causal mask for the text model. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + attention_mask, + causal_attention_mask, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class XCLIPTextTransformer(nn.Module): + def __init__(self, config: XCLIPTextConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + self.embeddings = XCLIPTextEmbeddings(config) + self.encoder = XCLIPEncoder(config) + self.final_layer_norm = nn.LayerNorm(embed_dim) + + @add_start_docstrings_to_model_forward(X_CLIP_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=XCLIPTextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is None: + raise ValueError("You have to specify either input_ids") + + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + + hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) + + batch_size, seq_len = input_shape + # X_CLIP's text model uses causal mask, prepare it here. + # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 + causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_len, hidden_states.dtype).to( + hidden_states.device + ) + # expand attention_mask + if attention_mask is not None: + # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, hidden_states.dtype) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + last_hidden_state = self.final_layer_norm(last_hidden_state) + + # text_embeds.shape = [batch_size, sequence_length, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + pooled_output = last_hidden_state[torch.arange(last_hidden_state.shape[0]), input_ids.argmax(dim=-1)] + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + def _build_causal_attention_mask(self, batch_size, seq_len, dtype): + # lazily create causal attention mask, with full attention between the vision tokens + # pytorch uses additive attention mask; fill with -inf + mask = torch.empty(batch_size, seq_len, seq_len, dtype=dtype) + mask.fill_(torch.tensor(torch.finfo(dtype).min)) + mask.triu_(1) # zero out the lower diagonal + mask = mask.unsqueeze(1) # expand mask + return mask + + +class XCLIPTextModel(XCLIPPreTrainedModel): + config_class = XCLIPTextConfig + + def __init__(self, config: XCLIPTextConfig): + super().__init__(config) + self.text_model = XCLIPTextTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.text_model.embeddings.token_embedding + + def set_input_embeddings(self, value): + self.text_model.embeddings.token_embedding = value + + @add_start_docstrings_to_model_forward(X_CLIP_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=XCLIPTextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import CLIPTokenizer, XCLIPTextModel + + >>> model = XCLIPTextModel.from_pretrained("microsoft/xclip-base-patch32") + >>> tokenizer = CLIPTokenizer.from_pretrained("microsoft/xclip-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled (EOS token) states + ```""" + return self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class XCLIPVisionEncoder(nn.Module): + """ + Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a + [`XCLIPVisionEncoderLayer`]. + + Args: + config: XCLIPConfig + """ + + def __init__(self, config: XCLIPConfig): + super().__init__() + self.config = config + self.layers = nn.ModuleList([XCLIPVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + inputs_embeds, + attention_mask: Optional[torch.Tensor] = None, + causal_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Causal mask for the text model. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + hidden_states = inputs_embeds + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + attention_mask, + causal_attention_mask, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class XCLIPVisionTransformer(nn.Module): + """ + This corresponds to the `CrossFrameCommunicationTransformer` class in the original implementation. + """ + + def __init__(self, config: XCLIPVisionConfig): + super().__init__() + self.config = config + embed_dim = config.hidden_size + + self.embeddings = XCLIPVisionEmbeddings(config) + self.pre_layernorm = nn.LayerNorm(embed_dim) + self.encoder = XCLIPVisionEncoder(config) + self.post_layernorm = nn.LayerNorm(embed_dim) + + @add_start_docstrings_to_model_forward(X_CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=XCLIPVisionConfig) + def forward( + self, + pixel_values: torch.FloatTensor, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + hidden_states = self.embeddings(pixel_values) + hidden_states = self.pre_layernorm(hidden_states) + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + pooled_output = last_hidden_state[:, 0, :] + pooled_output = self.post_layernorm(pooled_output) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class XCLIPVisionModel(XCLIPPreTrainedModel): + config_class = XCLIPVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: XCLIPVisionConfig): + super().__init__(config) + self.vision_model = XCLIPVisionTransformer(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.patch_embedding + + @add_start_docstrings_to_model_forward(X_CLIP_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=XCLIPVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPooling]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import XCLIPProcessor, XCLIPVisionModel + + >>> model = XCLIPVisionModel.from_pretrained("microsoft/xclip-base-patch32") + >>> processor = XCLIPProcessor.from_pretrained("microsoft/xclip-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + return self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + +class XCLIPMultiframeIntegrationTransformer(nn.Module): + """ + This corresponds to the `MultiframeIntegrationTransformer` class in the original implementation. + """ + + def __init__(self, config: XCLIPVisionConfig): + super().__init__() + + self.position_embedding = nn.Parameter(torch.empty(1, config.num_frames, config.hidden_size)) + self.encoder = XCLIPEncoder(config) + + def forward( + self, + hidden_states, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + residual = hidden_states + + # add position embeddings + hidden_states = hidden_states + self.position_embedding + + encoder_outputs = self.encoder( + inputs_embeds=hidden_states, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + last_hidden_state = encoder_outputs[0] + + last_hidden_state = last_hidden_state.type(hidden_states.dtype) + residual + + pooled_output = last_hidden_state.mean(dim=1, keepdim=False) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +class XCLIPCrossAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config): + super().__init__() + self.num_heads = config.prompt_num_attention_heads + + dim = config.projection_dim + head_dim = dim // self.num_heads + self.scale = head_dim**-0.5 + + self.q_proj = nn.Linear(dim, dim, bias=False) + self.k_proj = nn.Linear(dim, dim, bias=False) + self.v_proj = nn.Linear(dim, dim, bias=False) + + self.attn_drop = nn.Dropout(config.prompt_attention_dropout) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(config.prompt_projection_dropout) + + def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): + return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward(self, queries, keys, values): + """Input shape: Batch x Time x Channel""" + batch_size, query_seq_len, hidden_size = queries.shape + batch_size, key_seq_len, hidden_size = keys.shape + queries = ( + self.q_proj(queries) + .reshape(batch_size, query_seq_len, self.num_heads, hidden_size // self.num_heads) + .permute(0, 2, 1, 3) + ) + keys = ( + self.k_proj(keys) + .reshape(batch_size, key_seq_len, self.num_heads, hidden_size // self.num_heads) + .permute(0, 2, 1, 3) + ) + values = ( + self.v_proj(values) + .reshape(batch_size, key_seq_len, self.num_heads, hidden_size // self.num_heads) + .permute(0, 2, 1, 3) + ) + + attn = (queries @ keys.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ values).transpose(1, 2).reshape(batch_size, query_seq_len, hidden_size) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class PromptGeneratorLayer(nn.Module): + def __init__(self, config): + super().__init__() + + embed_dim = config.projection_dim + self.cross_attn = XCLIPCrossAttention(config) + self.norm1 = nn.LayerNorm(embed_dim) + self.norm3 = nn.LayerNorm(embed_dim) + self.mlp = nn.Sequential( + nn.Linear(embed_dim, embed_dim * 4), + ACT2FN[config.prompt_hidden_act], + nn.Dropout(config.prompt_attention_dropout), + nn.Linear(embed_dim * 4, embed_dim), + ) + + def forward(self, x, visual): + x = x + self.cross_attn(self.norm1(x), visual, visual) + x = x + self.mlp(self.norm3(x)) + return x + + +class XCLIPPromptGenerator(nn.Module): + """This corresponds to the `VideoSpecificPrompt` class in the original implementation.""" + + def __init__(self, config): + super().__init__() + embed_dim = config.projection_dim + self.layernorm = nn.LayerNorm(embed_dim) + self.decoder = nn.ModuleList([PromptGeneratorLayer(config) for _ in range(config.prompt_layers)]) + self.alpha = nn.Parameter(torch.ones(embed_dim) * config.prompt_alpha) + + def forward(self, text, visual): + visual = self.layernorm(visual) + for layer in self.decoder: + text = layer(text, visual) + + return self.alpha * text + + +@add_start_docstrings(X_CLIP_START_DOCSTRING) +class XCLIPModel(XCLIPPreTrainedModel): + config_class = XCLIPConfig + + def __init__(self, config: XCLIPConfig): + super().__init__(config) + + if not isinstance(config.text_config, XCLIPTextConfig): + raise ValueError( + "config.text_config is expected to be of type XCLIPTextConfig but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, XCLIPVisionConfig): + raise ValueError( + "config.vision_config is expected to be of type XCLIPVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + self.text_embed_dim = text_config.hidden_size + self.vision_embed_dim = vision_config.hidden_size + + self.text_model = XCLIPTextTransformer(text_config) + self.vision_model = XCLIPVisionTransformer(vision_config) + + self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) + self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) + self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value) + + self.prompts_visual_layernorm = nn.LayerNorm(self.vision_embed_dim) + self.prompts_visual_projection = nn.Parameter(torch.randn(self.vision_embed_dim, self.projection_dim)) + + mit_config = copy(vision_config) + mit_config.hidden_size = vision_config.mit_hidden_size + mit_config.intermediate_size = vision_config.mit_intermediate_size + mit_config.num_hidden_layers = vision_config.mit_num_hidden_layers + mit_config.num_attention_heads = vision_config.mit_num_attention_heads + self.mit = XCLIPMultiframeIntegrationTransformer(mit_config) + + self.prompts_generator = XCLIPPromptGenerator(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(X_CLIP_TEXT_INPUTS_DOCSTRING) + def get_text_features( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by + applying the projection layer to the pooled output of [`XCLIPTextModel`]. + + Examples: + + ```python + >>> from transformers import CLIPTokenizer, XCLIPModel + + >>> model = XCLIPModel.from_pretrained("microsoft/xclip-base-patch32") + >>> tokenizer = CLIPTokenizer.from_pretrained("microsoft/xclip-base-patch32") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + >>> text_features = model.get_text_features(**inputs) + ```""" + # Use X_CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_embeds = text_outputs[1] + text_embeds = self.text_projection(text_embeds) + + return text_embeds + + @add_start_docstrings_to_model_forward(X_CLIP_VISION_INPUTS_DOCSTRING) + def get_video_features( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + video_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The video embeddings obtained by + applying the projection layer to the pooled output of [`XCLIPVisionModel`] and + [`XCLIPMultiframeIntegrationTransformer`]. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import XCLIPProcessor, XCLIPModel + + >>> model = XCLIPModel.from_pretrained("microsoft/xclip-base-patch32") + >>> processor = XCLIPProcessor.from_pretrained("microsoft/xclip-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> video_features = model.get_video_features(**inputs) + ```""" + # Use X_CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, num_frames, num_channels, height, width = pixel_values.shape + pixel_values = pixel_values.reshape(-1, num_channels, height, width) + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + video_embeds = vision_outputs[1] + video_embeds = self.visual_projection(video_embeds) + + cls_features = video_embeds.view(batch_size, num_frames, -1) + + mit_outputs = self.mit( + cls_features, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + video_embeds = mit_outputs[1] + + return video_embeds + + @add_start_docstrings_to_model_forward(X_CLIP_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=XCLIPOutput, config_class=XCLIPConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, XCLIPOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import XCLIPProcessor, XCLIPModel + + >>> model = XCLIPModel.from_pretrained("microsoft/xclip-base-patch32") + >>> processor = XCLIPProcessor.from_pretrained("microsoft/xclip-base-patch32") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor( + ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True + ... ) + + >>> outputs = model(**inputs) + >>> logits_per_video = outputs.logits_per_video # this is the video-text similarity score + >>> probs = logits_per_video.softmax(dim=1) # we can take the softmax to get the label probabilities + ```""" + # Use X_CLIP model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, num_frames, num_channels, height, width = pixel_values.shape + pixel_values = pixel_values.reshape(-1, num_channels, height, width) + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + video_embeds = vision_outputs[1] + video_embeds = self.visual_projection(video_embeds) + + cls_features = video_embeds.view(batch_size, num_frames, -1) + + mit_outputs = self.mit( + cls_features, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + video_embeds = mit_outputs[1] + + img_features = vision_outputs[0][:, 1:, :] + img_features = self.prompts_visual_layernorm(img_features) + img_features = img_features @ self.prompts_visual_projection + img_features = img_features.view(batch_size, num_frames, -1, video_embeds.shape[-1]) + img_features = img_features.mean(dim=1, keepdim=False) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_embeds = text_outputs[1] + text_embeds = self.text_projection(text_embeds) + + text_embeds = text_embeds.unsqueeze(0).expand(batch_size, -1, -1) + text_embeds = text_embeds + self.prompts_generator(text_embeds, img_features) + + # normalized features + video_embeds = video_embeds / video_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logit_scale = self.logit_scale.exp() + logits_per_video = torch.einsum("bd,bkd->bk", video_embeds, logit_scale * text_embeds) + logits_per_text = logits_per_video.T + + loss = None + if return_loss: + loss = x_clip_loss(logits_per_text) + + if not return_dict: + output = (logits_per_video, logits_per_text, text_embeds, video_embeds, text_outputs, vision_outputs) + return ((loss,) + output) if loss is not None else output + + return XCLIPOutput( + loss=loss, + logits_per_video=logits_per_video, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + video_embeds=video_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + mit_output=mit_outputs, + ) diff --git a/src/transformers/models/x_clip/processing_x_clip.py b/src/transformers/models/x_clip/processing_x_clip.py new file mode 100644 index 00000000000000..7e694a3e339ecd --- /dev/null +++ b/src/transformers/models/x_clip/processing_x_clip.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Image/Text processor class for XCLIP +""" +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding + + +class XCLIPProcessor(ProcessorMixin): + r""" + Constructs an X-CLIP processor which wraps a VideoMAE feature extractor and a CLIP tokenizer into a single + processor. + + [`XCLIPProcessor`] offers all the functionalities of [`VideoMAEFeatureExtractor`] and [`CLIPTokenizerFast`]. See + the [`~XCLIPProcessor.__call__`] and [`~XCLIPProcessor.decode`] for more information. + + Args: + feature_extractor ([`VideoMAEFeatureExtractor`]): + The feature extractor is a required input. + tokenizer ([`CLIPTokenizerFast`]): + The tokenizer is a required input. + """ + feature_extractor_class = "VideoMAEFeatureExtractor" + tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") + + def __init__(self, feature_extractor, tokenizer): + super().__init__(feature_extractor, tokenizer) + self.current_processor = self.feature_extractor + + def __call__(self, text=None, videos=None, return_tensors=None, **kwargs): + """ + Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` + and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode + the text. To prepare the image(s), this method forwards the `videos` and `kwargs` arguments to + VideoMAEFeatureExtractor's [`~VideoMAEFeatureExtractor.__call__`] if `videos` is not `None`. Please refer to + the doctsring of the above two methods for more information. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + videos (`List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, `List[List[PIL.Image.Image]]`, `List[List[np.ndarrray]]`,: + `List[List[torch.Tensor]]`): The video or batch of videos to be prepared. Each video should be a list + of frames, which can be either PIL images or NumPy arrays. In case of NumPy arrays/PyTorch tensors, + each frame should be of shape (H, W, C), where H and W are frame height and width, and C is a number of + channels. + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + + Returns: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `videos` is not `None`. + """ + + if text is None and videos is None: + raise ValueError("You have to specify either text or videos. Both cannot be none.") + + if text is not None: + encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) + + if videos is not None: + image_features = self.feature_extractor(videos, return_tensors=return_tensors, **kwargs) + + if text is not None and videos is not None: + encoding["pixel_values"] = image_features.pixel_values + return encoding + elif text is not None: + return encoding + else: + return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index dbdf37da4c7161..c3017bc6a6e05f 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -5202,6 +5202,37 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class XCLIPModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XCLIPPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XCLIPTextModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class XCLIPVisionModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index fa30432070a37b..e1f4f3b1fd9fa1 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -24,13 +24,6 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) -class CLIPProcessor(metaclass=DummyObject): - _backends = ["vision"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["vision"]) - - class ConvNextFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] diff --git a/tests/models/x_clip/__init__.py b/tests/models/x_clip/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/x_clip/test_modeling_x_clip.py b/tests/models/x_clip/test_modeling_x_clip.py new file mode 100644 index 00000000000000..62c8e9992b0d9c --- /dev/null +++ b/tests/models/x_clip/test_modeling_x_clip.py @@ -0,0 +1,672 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch XCLIP model. """ + + +import inspect +import os +import tempfile +import unittest + +import numpy as np + +from huggingface_hub import hf_hub_download +from transformers import XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig +from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device +from transformers.utils import is_torch_available, is_vision_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ( + ModelTesterMixin, + _config_zero_init, + floats_tensor, + ids_tensor, + random_attention_mask, +) + + +if is_torch_available(): + import torch + from torch import nn + + from transformers import XCLIPModel, XCLIPTextModel, XCLIPVisionModel + from transformers.models.x_clip.modeling_x_clip import XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from transformers import XCLIPProcessor + + +class XCLIPVisionModelTester: + def __init__( + self, + parent, + batch_size=8, + image_size=30, + patch_size=2, + num_channels=3, + num_frames=8, # important; the batch size * time must be divisible by the number of frames + is_training=True, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + mit_hidden_size=64, + dropout=0.1, + attention_dropout=0.1, + initializer_range=0.02, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_frames = num_frames + self.is_training = is_training + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.mit_hidden_size = mit_hidden_size + self.dropout = dropout + self.attention_dropout = attention_dropout + self.initializer_range = initializer_range + self.scope = scope + + # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) + num_patches = (image_size // patch_size) ** 2 + self.seq_length = num_patches + 1 + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor( + [self.batch_size * self.num_frames, self.num_channels, self.image_size, self.image_size] + ) + config = self.get_config() + + return config, pixel_values + + def get_config(self): + return XCLIPVisionConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + num_frames=self.num_frames, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + mit_hidden_size=self.mit_hidden_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + initializer_range=self.initializer_range, + ) + + def create_and_check_model(self, config, pixel_values): + model = XCLIPVisionModel(config=config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + result = model(pixel_values) + # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + image_size = (self.image_size, self.image_size) + patch_size = (self.patch_size, self.patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.parent.assertEqual( + result.last_hidden_state.shape, (self.batch_size * self.num_frames, num_patches + 1, self.hidden_size) + ) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size * self.num_frames, self.hidden_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_torch +class XCLIPVisionModelTest(ModelTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as X-CLIP does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = (XCLIPVisionModel,) if is_torch_available() else () + fx_compatible = False + test_pruning = False + test_resize_embeddings = False + test_head_masking = False + + def setUp(self): + self.model_tester = XCLIPVisionModelTester(self) + self.config_tester = ConfigTester( + self, config_class=XCLIPVisionConfig, has_text_modality=False, hidden_size=37 + ) + + def test_config(self): + self.config_tester.run_common_tests() + + @unittest.skip(reason="X-CLIP does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + def test_model_common_attributes(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) + x = model.get_output_embeddings() + self.assertTrue(x is None or isinstance(x, nn.Linear)) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_training(self): + pass + + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip(reason="XCLIPVisionModel has no base class and is not available in MODEL_MAPPING") + def test_save_load_fast_init_from_base(self): + pass + + @unittest.skip(reason="XCLIPVisionModel has no base class and is not available in MODEL_MAPPING") + def test_save_load_fast_init_to_base(self): + pass + + @slow + def test_model_from_pretrained(self): + for model_name in XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = XCLIPVisionModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + def test_gradient_checkpointing_backward_compatibility(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + if not model_class.supports_gradient_checkpointing: + continue + + print("Model class:", model_class) + + config.gradient_checkpointing = True + model = model_class(config) + self.assertTrue(model.is_gradient_checkpointing) + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + # we add 1 here due to the special message token in X-CLIP's vision encoder + seq_len = getattr(self.model_tester, "seq_length", None) + 1 + encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + self.assertEqual(len(outputs.attentions), self.model_tester.num_hidden_layers) + + self.assertListEqual( + list(outputs.attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], + ) + out_len = len(outputs) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + self.assertEqual(out_len + 1, len(outputs)) + + self_attentions = outputs.attentions + + self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(self_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], + ) + + @require_torch_multi_gpu + def test_multi_gpu_data_parallel_forward(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + # some params shouldn't be scattered by nn.DataParallel + # so just remove them if they are present. + blacklist_non_batched_params = ["head_mask", "decoder_head_mask", "cross_attn_head_mask"] + for k in blacklist_non_batched_params: + inputs_dict.pop(k, None) + + # move input tensors to cuda:O + for k, v in inputs_dict.items(): + if torch.is_tensor(v): + inputs_dict[k] = v.to(0) + + for model_class in self.all_model_classes: + model = model_class(config=config) + model.to(0) + model.eval() + + # Wrap model in nn.DataParallel + model = nn.DataParallel(model) + with torch.no_grad(): + test = self._prepare_for_class(inputs_dict, model_class) + for k, v in test.items(): + if isinstance(v, torch.Tensor): + print(k, v.shape) + else: + print(k, v) + _ = model(**self._prepare_for_class(inputs_dict, model_class)) + + +class XCLIPTextModelTester: + def __init__( + self, + parent, + batch_size=8, + seq_length=7, + is_training=True, + use_input_mask=True, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + dropout=0.1, + attention_dropout=0.1, + max_position_embeddings=512, + initializer_range=0.02, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.dropout = dropout + self.attention_dropout = attention_dropout + self.max_position_embeddings = max_position_embeddings + self.initializer_range = initializer_range + self.scope = scope + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + if input_mask is not None: + batch_size, seq_length = input_mask.shape + rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) + for batch_idx, start_index in enumerate(rnd_start_indices): + input_mask[batch_idx, :start_index] = 1 + input_mask[batch_idx, start_index:] = 0 + + config = self.get_config() + + return config, input_ids, input_mask + + def get_config(self): + return XCLIPTextConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + max_position_embeddings=self.max_position_embeddings, + initializer_range=self.initializer_range, + ) + + def create_and_check_model(self, config, input_ids, input_mask): + model = XCLIPTextModel(config=config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + result = model(input_ids, attention_mask=input_mask) + result = model(input_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, input_mask = config_and_inputs + inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} + return config, inputs_dict + + +@require_torch +class XCLIPTextModelTest(ModelTesterMixin, unittest.TestCase): + + all_model_classes = (XCLIPTextModel,) if is_torch_available() else () + fx_compatible = False + test_pruning = False + test_head_masking = False + + def setUp(self): + self.model_tester = XCLIPTextModelTester(self) + self.config_tester = ConfigTester(self, config_class=XCLIPTextConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_training(self): + pass + + def test_training_gradient_checkpointing(self): + pass + + @unittest.skip(reason="X-CLIP does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="XCLIPTextModel has no base class and is not available in MODEL_MAPPING") + def test_save_load_fast_init_from_base(self): + pass + + @unittest.skip(reason="XCLIPTextModel has no base class and is not available in MODEL_MAPPING") + def test_save_load_fast_init_to_base(self): + pass + + @slow + def test_model_from_pretrained(self): + for model_name in XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = XCLIPTextModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +class XCLIPModelTester: + def __init__(self, parent, projection_dim=64, mit_hidden_size=64, is_training=True): + self.parent = parent + self.projection_dim = projection_dim + self.mit_hidden_size = mit_hidden_size + self.text_model_tester = XCLIPTextModelTester(parent) + self.vision_model_tester = XCLIPVisionModelTester(parent) + self.is_training = is_training + + def prepare_config_and_inputs(self): + text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() + vision_config, _ = self.vision_model_tester.prepare_config_and_inputs() + pixel_values = floats_tensor( + [ + self.vision_model_tester.batch_size, + self.vision_model_tester.num_frames, + self.vision_model_tester.num_channels, + self.vision_model_tester.image_size, + self.vision_model_tester.image_size, + ] + ) + + config = self.get_config() + + return config, input_ids, attention_mask, pixel_values + + def get_config(self): + return XCLIPConfig.from_text_vision_configs( + self.text_model_tester.get_config(), + self.vision_model_tester.get_config(), + projection_dim=self.projection_dim, + ) + + def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): + model = XCLIPModel(config).to(torch_device).eval() + with torch.no_grad(): + result = model(input_ids, pixel_values, attention_mask) + self.parent.assertEqual( + result.logits_per_video.shape, + (self.vision_model_tester.batch_size, self.text_model_tester.batch_size), + ) + self.parent.assertEqual( + result.logits_per_text.shape, + (self.text_model_tester.batch_size, self.vision_model_tester.batch_size), + ) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, attention_mask, pixel_values = config_and_inputs + inputs_dict = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "pixel_values": pixel_values, + "return_loss": True, + } + return config, inputs_dict + + +@require_torch +class XCLIPModelTest(ModelTesterMixin, unittest.TestCase): + all_model_classes = (XCLIPModel,) if is_torch_available() else () + fx_compatible = False + test_head_masking = False + test_pruning = False + test_resize_embeddings = False + test_attention_outputs = False + test_torchscript = False + maxdiff = None + + def setUp(self): + self.model_tester = XCLIPModelTester(self) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + @unittest.skip(reason="Hidden_states is tested in individual model tests") + def test_hidden_states_output(self): + pass + + @unittest.skip(reason="Inputs_embeds is tested in individual model tests") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="Retain_grad is tested in individual model tests") + def test_retain_grad_hidden_states_attentions(self): + pass + + @unittest.skip(reason="XCLIPModel does not have input/output embeddings") + def test_model_common_attributes(self): + pass + + @unittest.skip(reason="XCLIPModel does not support feedforward chunking") + def test_feed_forward_chunking(self): + pass + + # override as the `logit_scale`, `prompts_generator.alpha` parameters require special treatment + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + if param.requires_grad: + # check if `logit_scale` is initilized as per the original implementation + if name == "logit_scale": + self.assertAlmostEqual( + param.data.item(), + np.log(1 / 0.07), + delta=1e-3, + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + elif name == "prompts_generator.alpha": + self.assertAlmostEqual(param.data.mean().item(), model.config.prompt_alpha) + else: + self.assertIn( + ((param.data.mean() * 1e9).round() / 1e9).item(), + [0.0, 1.0], + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + def _create_and_check_torchscript(self, config, inputs_dict): + if not self.test_torchscript: + return + + configs_no_init = _config_zero_init(config) # To be sure we have no Nan + configs_no_init.torchscript = True + configs_no_init.return_dict = False + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + model.to(torch_device) + model.eval() + + try: + input_ids = inputs_dict["input_ids"] + pixel_values = inputs_dict["pixel_values"] # X-CLIP needs pixel_values + traced_model = torch.jit.trace(model, (input_ids, pixel_values)) + except RuntimeError: + self.fail("Couldn't trace module.") + + with tempfile.TemporaryDirectory() as tmp_dir_name: + pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") + + try: + torch.jit.save(traced_model, pt_file_name) + except Exception: + self.fail("Couldn't save module.") + + try: + loaded_model = torch.jit.load(pt_file_name) + except Exception: + self.fail("Couldn't load module.") + + model.to(torch_device) + model.eval() + + loaded_model.to(torch_device) + loaded_model.eval() + + model_state_dict = model.state_dict() + loaded_model_state_dict = loaded_model.state_dict() + + self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) + + models_equal = True + for layer_name, p1 in model_state_dict.items(): + p2 = loaded_model_state_dict[layer_name] + if p1.data.ne(p2.data).sum() > 0: + models_equal = False + + self.assertTrue(models_equal) + + def test_load_vision_text_config(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + # Save XCLIPConfig and check if we can load XCLIPVisionConfig from it + with tempfile.TemporaryDirectory() as tmp_dir_name: + config.save_pretrained(tmp_dir_name) + vision_config = XCLIPVisionConfig.from_pretrained(tmp_dir_name) + self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) + + # Save XCLIPConfig and check if we can load XCLIPTextConfig from it + with tempfile.TemporaryDirectory() as tmp_dir_name: + config.save_pretrained(tmp_dir_name) + text_config = XCLIPTextConfig.from_pretrained(tmp_dir_name) + self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) + + @slow + def test_model_from_pretrained(self): + for model_name in XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = XCLIPModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +# We will verify our results on a spaghetti video +def prepare_video(): + file = hf_hub_download( + repo_id="datasets/hf-internal-testing/spaghetti-video", filename="eating_spaghetti_8_frames.npy" + ) + video = np.load(file) + return list(video) + + +@require_vision +@require_torch +class XCLIPModelIntegrationTest(unittest.TestCase): + @slow + def test_inference(self): + model_name = "microsoft/xclip-base-patch32" + model = XCLIPModel.from_pretrained(model_name).to(torch_device) + processor = XCLIPProcessor.from_pretrained(model_name) + + video = prepare_video() + inputs = processor( + text=["playing sports", "eating spaghetti", "go shopping"], videos=video, return_tensors="pt", padding=True + ).to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + # verify the logits + self.assertEqual( + outputs.logits_per_video.shape, + torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), + ) + self.assertEqual( + outputs.logits_per_text.shape, + torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), + ) + + expected_logits = torch.tensor([[14.3819, 20.6031, 15.0526]], device=torch_device) + + self.assertTrue(torch.allclose(outputs.logits_per_video, expected_logits, atol=1e-3)) diff --git a/utils/check_config_docstrings.py b/utils/check_config_docstrings.py index bcbbace39e0e71..b0e5d1ced51686 100644 --- a/utils/check_config_docstrings.py +++ b/utils/check_config_docstrings.py @@ -49,6 +49,7 @@ "SpeechEncoderDecoderConfig", "VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig", + "XCLIPConfig", } diff --git a/utils/check_repo.py b/utils/check_repo.py index a9beda0c372709..207592c91c961a 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -207,6 +207,8 @@ "TFWav2Vec2ForCTC", "TFHubertForCTC", "MaskFormerForInstanceSegmentation", + "XCLIPVisionModel", + "XCLIPTextModel", ] # Update this list for models that have multiple model types for the same From 895c528886dfee84d51342e2063c8bde29da208f Mon Sep 17 00:00:00 2001 From: Nima Boscarino Date: Fri, 9 Sep 2022 00:15:24 -0700 Subject: [PATCH 245/539] Update translation requests contact (#18941) * Update TRANSLATING.md Update the contact to @GuggerSylvain * Update docs/TRANSLATING.md Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- docs/TRANSLATING.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/TRANSLATING.md b/docs/TRANSLATING.md index cc40dd725ec05a..c6f5c45baf0291 100644 --- a/docs/TRANSLATING.md +++ b/docs/TRANSLATING.md @@ -54,5 +54,4 @@ The fields you should add are `local` (with the name of the file containing the Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter. -> 🙋 If you'd like others to help you with the translation, you can either [open an issue](https://github.com/huggingface/transformers/issues) or tag @[espejelomar](https://twitter.com/espejelomar) - on Twitter to gain some visibility. \ No newline at end of file +> 🙋 If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/transformers/issues) and tag @sgugger. From 22f72185601d5167a747104b4aca102d0e92524c Mon Sep 17 00:00:00 2001 From: HuYong Date: Fri, 9 Sep 2022 19:36:46 +0800 Subject: [PATCH 246/539] add task_type_id to BERT to support ERNIE-2.0 and ERNIE-3.0 models (#18686) * add_ernie * remove Tokenizer in ernie * polish code * format code style * polish code * fix style * update doc * make fix-copies * change model name * change model name * fix dependency * add more copied from * rename ErnieLMHeadModel to ErnieForCausalLM do not expose ErnieLayer update doc * fix * make style * polish code * polish code * fix * fix * fix * fix * fix * final fix Co-authored-by: ydshieh --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/ernie.mdx | 102 + docs/source/en/serialization.mdx | 1 + src/transformers/__init__.py | 33 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + src/transformers/models/auto/modeling_auto.py | 10 + .../models/auto/tokenization_auto.py | 1 + src/transformers/models/ernie/__init__.py | 74 + .../models/ernie/configuration_ernie.py | 169 ++ .../models/ernie/modeling_ernie.py | 1830 +++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 73 + tests/models/ernie/__init__.py | 0 tests/models/ernie/test_modeling_ernie.py | 577 ++++++ 19 files changed, 2882 insertions(+) create mode 100644 docs/source/en/model_doc/ernie.mdx create mode 100644 src/transformers/models/ernie/__init__.py create mode 100644 src/transformers/models/ernie/configuration_ernie.py create mode 100644 src/transformers/models/ernie/modeling_ernie.py create mode 100644 tests/models/ernie/__init__.py create mode 100644 tests/models/ernie/test_modeling_ernie.py diff --git a/README.md b/README.md index e832a113e488d7..6edaf4f012fc71 100644 --- a/README.md +++ b/README.md @@ -295,6 +295,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. +1. **[ERNIE](https://huggingface.co/docs/transformers/main/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. diff --git a/README_ko.md b/README_ko.md index 0566c31de2a41d..18c300975833c9 100644 --- a/README_ko.md +++ b/README_ko.md @@ -247,6 +247,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. +1. **[ERNIE](https://huggingface.co/docs/transformers/main/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. diff --git a/README_zh-hans.md b/README_zh-hans.md index a3bd914c09a05e..77710265651f59 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -271,6 +271,7 @@ conda install -c huggingface transformers 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (来自 Intel Labs) 伴随论文 [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 由 René Ranftl, Alexey Bochkovskiy, Vladlen Koltun 发布。 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。 +1. **[ERNIE](https://huggingface.co/docs/transformers/main/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (来自 CNRS) 伴随论文 [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) 由 Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab 发布。 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (来自 Facebook AI) 伴随论文 [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) 由 Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela 发布。 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (来自 Google Research) 伴随论文 [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) 由 James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index f544dd5b53d922..196f4741848371 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -283,6 +283,7 @@ conda install -c huggingface transformers 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. +1. **[ERNIE](https://huggingface.co/docs/transformers/main/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 6135a830181a4b..c4c5f2162aba3c 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -237,6 +237,8 @@ title: ELECTRA - local: model_doc/encoder-decoder title: Encoder Decoder Models + - local: model_doc/ernie + title: ERNIE - local: model_doc/flaubert title: FlauBERT - local: model_doc/fnet diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 6a1722cf33ffa5..fb2ff2d418229b 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -87,6 +87,7 @@ The documentation is organized into five sections: 1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. +1. **[ERNIE](model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. 1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. @@ -230,6 +231,7 @@ Flax), PyTorch, and/or TensorFlow. | DPT | ❌ | ❌ | ✅ | ❌ | ❌ | | ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ | | Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ | +| ERNIE | ❌ | ❌ | ✅ | ❌ | ❌ | | FairSeq Machine-Translation | ✅ | ❌ | ✅ | ❌ | ❌ | | FlauBERT | ✅ | ❌ | ✅ | ✅ | ❌ | | FLAVA | ❌ | ❌ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/ernie.mdx b/docs/source/en/model_doc/ernie.mdx new file mode 100644 index 00000000000000..6ec3f104732008 --- /dev/null +++ b/docs/source/en/model_doc/ernie.mdx @@ -0,0 +1,102 @@ + + +# ERNIE + +## Overview +ERNIE is a series of powerful models proposed by baidu, especially in Chinese tasks, +including [ERNIE1.0](https://arxiv.org/abs/1904.09223), [ERNIE2.0](https://ojs.aaai.org/index.php/AAAI/article/view/6428), +[ERNIE3.0](https://arxiv.org/abs/2107.02137), [ERNIE-Gram](https://arxiv.org/abs/2010.12148), [ERNIE-health](https://arxiv.org/abs/2110.07244), etc. + +These models are contributed by [nghuyong](https://huggingface.co/nghuyong) and the official code can be found in [PaddleNLP](https://github.com/PaddlePaddle/PaddleNLP) (in PaddlePaddle). + +### How to use +Take `ernie-1.0-base-zh` as an example: + +```Python +from transformers import AutoTokenizer, AutoModel +tokenizer = AutoTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh") +model = AutoModel.from_pretrained("nghuyong/ernie-1.0-base-zh") +``` + +### Supported Models + +| Model Name | Language | Description | +|:-------------------:|:--------:|:-------------------------------:| +| ernie-1.0-base-zh | Chinese | Layer:12, Heads:12, Hidden:768 | +| ernie-2.0-base-en | English | Layer:12, Heads:12, Hidden:768 | +| ernie-2.0-large-en | English | Layer:24, Heads:16, Hidden:1024 | +| ernie-3.0-base-zh | Chinese | Layer:12, Heads:12, Hidden:768 | +| ernie-3.0-medium-zh | Chinese | Layer:6, Heads:12, Hidden:768 | +| ernie-3.0-mini-zh | Chinese | Layer:6, Heads:12, Hidden:384 | +| ernie-3.0-micro-zh | Chinese | Layer:4, Heads:12, Hidden:384 | +| ernie-3.0-nano-zh | Chinese | Layer:4, Heads:12, Hidden:312 | +| ernie-health-zh | Chinese | Layer:12, Heads:12, Hidden:768 | +| ernie-gram-zh | Chinese | Layer:12, Heads:12, Hidden:768 | + +You can find all the supported models from huggingface's model hub: [huggingface.co/nghuyong](https://huggingface.co/nghuyong), and model details from paddle's official +repo: [PaddleNLP](https://paddlenlp.readthedocs.io/zh/latest/model_zoo/transformers/ERNIE/contents.html) +and [ERNIE](https://github.com/PaddlePaddle/ERNIE/blob/repro). + +## ErnieConfig + +[[autodoc]] ErnieConfig + - all + +## Ernie specific outputs + +[[autodoc]] models.ernie.modeling_ernie.ErnieForPreTrainingOutput + +## ErnieModel + +[[autodoc]] ErnieModel + - forward + +## ErnieForPreTraining + +[[autodoc]] ErnieForPreTraining + - forward + +## ErnieForCausalLM + +[[autodoc]] ErnieForCausalLM + - forward + +## ErnieForMaskedLM + +[[autodoc]] ErnieForMaskedLM + - forward + +## ErnieForNextSentencePrediction + +[[autodoc]] ErnieForNextSentencePrediction + - forward + +## ErnieForSequenceClassification + +[[autodoc]] ErnieForSequenceClassification + - forward + +## ErnieForMultipleChoice + +[[autodoc]] ErnieForMultipleChoice + - forward + +## ErnieForTokenClassification + +[[autodoc]] ErnieForTokenClassification + - forward + +## ErnieForQuestionAnswering + +[[autodoc]] ErnieForQuestionAnswering + - forward \ No newline at end of file diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index 31ad430e06434b..74f50c78513ce6 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -67,6 +67,7 @@ Ready-made configurations include the following architectures: - DETR - DistilBERT - ELECTRA +- ERNIE - FlauBERT - GPT Neo - GPT-J diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index c94abb1dac6a3e..3bea1e4ad40b25 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -203,6 +203,10 @@ "models.dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"], "models.electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraTokenizer"], "models.encoder_decoder": ["EncoderDecoderConfig"], + "models.ernie": [ + "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ErnieConfig", + ], "models.flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertTokenizer"], "models.flava": [ "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", @@ -1168,6 +1172,21 @@ ] ) _import_structure["models.encoder_decoder"].append("EncoderDecoderModel") + _import_structure["models.ernie"].extend( + [ + "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", + "ErnieForCausalLM", + "ErnieForMaskedLM", + "ErnieForMultipleChoice", + "ErnieForNextSentencePrediction", + "ErnieForPreTraining", + "ErnieForQuestionAnswering", + "ErnieForSequenceClassification", + "ErnieForTokenClassification", + "ErnieModel", + "ErniePreTrainedModel", + ] + ) _import_structure["models.flaubert"].extend( [ "FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3066,6 +3085,7 @@ from .models.dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer from .models.encoder_decoder import EncoderDecoderConfig + from .models.ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer from .models.flava import ( FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, @@ -3879,6 +3899,19 @@ load_tf_weights_in_electra, ) from .models.encoder_decoder import EncoderDecoderModel + from .models.ernie import ( + ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, + ErnieForCausalLM, + ErnieForMaskedLM, + ErnieForMultipleChoice, + ErnieForNextSentencePrediction, + ErnieForPreTraining, + ErnieForQuestionAnswering, + ErnieForSequenceClassification, + ErnieForTokenClassification, + ErnieModel, + ErniePreTrainedModel, + ) from .models.flaubert import ( FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertForMultipleChoice, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 9cc42c8d1240f0..0e2bad475a142e 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -57,6 +57,7 @@ dpt, electra, encoder_decoder, + ernie, flaubert, flava, fnet, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index fe50973ac71696..785518fbcd0817 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -61,6 +61,7 @@ ("dpt", "DPTConfig"), ("electra", "ElectraConfig"), ("encoder-decoder", "EncoderDecoderConfig"), + ("ernie", "ErnieConfig"), ("flaubert", "FlaubertConfig"), ("flava", "FlavaConfig"), ("fnet", "FNetConfig"), @@ -188,6 +189,7 @@ ("dpr", "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("dpt", "DPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("electra", "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("ernie", "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("flaubert", "FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("flava", "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("fnet", "FNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -316,6 +318,7 @@ ("dpt", "DPT"), ("electra", "ELECTRA"), ("encoder-decoder", "Encoder decoder"), + ("ernie", "ERNIE"), ("flaubert", "FlauBERT"), ("flava", "FLAVA"), ("fnet", "FNet"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 79b58d49383d0c..8efde09b96ea64 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -60,6 +60,7 @@ ("dpr", "DPRQuestionEncoder"), ("dpt", "DPTModel"), ("electra", "ElectraModel"), + ("ernie", "ErnieModel"), ("flaubert", "FlaubertModel"), ("flava", "FlavaModel"), ("fnet", "FNetModel"), @@ -165,6 +166,7 @@ ("deberta-v2", "DebertaV2ForMaskedLM"), ("distilbert", "DistilBertForMaskedLM"), ("electra", "ElectraForPreTraining"), + ("ernie", "ErnieForPreTraining"), ("flaubert", "FlaubertWithLMHeadModel"), ("flava", "FlavaForPreTraining"), ("fnet", "FNetForPreTraining"), @@ -223,6 +225,7 @@ ("distilbert", "DistilBertForMaskedLM"), ("electra", "ElectraForMaskedLM"), ("encoder-decoder", "EncoderDecoderModel"), + ("ernie", "ErnieForMaskedLM"), ("flaubert", "FlaubertWithLMHeadModel"), ("fnet", "FNetForMaskedLM"), ("fsmt", "FSMTForConditionalGeneration"), @@ -284,6 +287,7 @@ ("ctrl", "CTRLLMHeadModel"), ("data2vec-text", "Data2VecTextForCausalLM"), ("electra", "ElectraForCausalLM"), + ("ernie", "ErnieForCausalLM"), ("gpt2", "GPT2LMHeadModel"), ("gpt_neo", "GPTNeoForCausalLM"), ("gpt_neox", "GPTNeoXForCausalLM"), @@ -413,6 +417,7 @@ ("deberta-v2", "DebertaV2ForMaskedLM"), ("distilbert", "DistilBertForMaskedLM"), ("electra", "ElectraForMaskedLM"), + ("ernie", "ErnieForMaskedLM"), ("flaubert", "FlaubertWithLMHeadModel"), ("fnet", "FNetForMaskedLM"), ("funnel", "FunnelForMaskedLM"), @@ -502,6 +507,7 @@ ("deberta-v2", "DebertaV2ForSequenceClassification"), ("distilbert", "DistilBertForSequenceClassification"), ("electra", "ElectraForSequenceClassification"), + ("ernie", "ErnieForSequenceClassification"), ("flaubert", "FlaubertForSequenceClassification"), ("fnet", "FNetForSequenceClassification"), ("funnel", "FunnelForSequenceClassification"), @@ -558,6 +564,7 @@ ("deberta-v2", "DebertaV2ForQuestionAnswering"), ("distilbert", "DistilBertForQuestionAnswering"), ("electra", "ElectraForQuestionAnswering"), + ("ernie", "ErnieForQuestionAnswering"), ("flaubert", "FlaubertForQuestionAnsweringSimple"), ("fnet", "FNetForQuestionAnswering"), ("funnel", "FunnelForQuestionAnswering"), @@ -627,6 +634,7 @@ ("deberta-v2", "DebertaV2ForTokenClassification"), ("distilbert", "DistilBertForTokenClassification"), ("electra", "ElectraForTokenClassification"), + ("ernie", "ErnieForTokenClassification"), ("flaubert", "FlaubertForTokenClassification"), ("fnet", "FNetForTokenClassification"), ("funnel", "FunnelForTokenClassification"), @@ -668,6 +676,7 @@ ("deberta-v2", "DebertaV2ForMultipleChoice"), ("distilbert", "DistilBertForMultipleChoice"), ("electra", "ElectraForMultipleChoice"), + ("ernie", "ErnieForMultipleChoice"), ("flaubert", "FlaubertForMultipleChoice"), ("fnet", "FNetForMultipleChoice"), ("funnel", "FunnelForMultipleChoice"), @@ -695,6 +704,7 @@ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict( [ ("bert", "BertForNextSentencePrediction"), + ("ernie", "ErnieForNextSentencePrediction"), ("fnet", "FNetForNextSentencePrediction"), ("megatron-bert", "MegatronBertForNextSentencePrediction"), ("mobilebert", "MobileBertForNextSentencePrediction"), diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 9eb802b1fb1d86..86cc4eba94f3da 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -121,6 +121,7 @@ ), ), ("electra", ("ElectraTokenizer", "ElectraTokenizerFast" if is_tokenizers_available() else None)), + ("ernie", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)), ("flaubert", ("FlaubertTokenizer", None)), ("fnet", ("FNetTokenizer", "FNetTokenizerFast" if is_tokenizers_available() else None)), ("fsmt", ("FSMTTokenizer", None)), diff --git a/src/transformers/models/ernie/__init__.py b/src/transformers/models/ernie/__init__.py new file mode 100644 index 00000000000000..b8dce9a15b594f --- /dev/null +++ b/src/transformers/models/ernie/__init__.py @@ -0,0 +1,74 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available + + +_import_structure = { + "configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_ernie"] = [ + "ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST", + "ErnieForCausalLM", + "ErnieForMaskedLM", + "ErnieForMultipleChoice", + "ErnieForNextSentencePrediction", + "ErnieForPreTraining", + "ErnieForQuestionAnswering", + "ErnieForSequenceClassification", + "ErnieForTokenClassification", + "ErnieModel", + "ErniePreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_ernie import ( + ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, + ErnieForCausalLM, + ErnieForMaskedLM, + ErnieForMultipleChoice, + ErnieForNextSentencePrediction, + ErnieForPreTraining, + ErnieForQuestionAnswering, + ErnieForSequenceClassification, + ErnieForTokenClassification, + ErnieModel, + ErniePreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/ernie/configuration_ernie.py b/src/transformers/models/ernie/configuration_ernie.py new file mode 100644 index 00000000000000..26a6ab535f58df --- /dev/null +++ b/src/transformers/models/ernie/configuration_ernie.py @@ -0,0 +1,169 @@ +# coding=utf-8 +# Copyright 2022 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" ERNIE model configuration""" +from collections import OrderedDict +from typing import Mapping + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "nghuyong/ernie-1.0-base-zh": "https://huggingface.co/nghuyong/ernie-1.0-base-zh/resolve/main/config.json", + "nghuyong/ernie-2.0-base-en": "https://huggingface.co/nghuyong/ernie-2.0-base-en/resolve/main/config.json", + "nghuyong/ernie-2.0-large-en": "https://huggingface.co/nghuyong/ernie-2.0-large-en/resolve/main/config.json", + "nghuyong/ernie-3.0-base-zh": "https://huggingface.co/nghuyong/ernie-3.0-base-zh/resolve/main/config.json", + "nghuyong/ernie-3.0-medium-zh": "https://huggingface.co/nghuyong/ernie-3.0-medium-zh/resolve/main/config.json", + "nghuyong/ernie-3.0-mini-zh": "https://huggingface.co/nghuyong/ernie-3.0-mini-zh/resolve/main/config.json", + "nghuyong/ernie-3.0-micro-zh": "https://huggingface.co/nghuyong/ernie-3.0-micro-zh/resolve/main/config.json", + "nghuyong/ernie-3.0-nano-zh": "https://huggingface.co/nghuyong/ernie-3.0-nano-zh/resolve/main/config.json", + "nghuyong/ernie-gram-zh": "https://huggingface.co/nghuyong/ernie-gram-zh/resolve/main/config.json", + "nghuyong/ernie-health-zh": "https://huggingface.co/nghuyong/ernie-health-zh/resolve/main/config.json", +} + + +class ErnieConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ErnieModel`] or a [`TFErnieModel`]. It is used to + instantiate a ERNIE model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the ERNIE + [nghuyong/ernie-3.0-base-zh](https://huggingface.co/nghuyong/ernie-3.0-base-zh) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the ERNIE model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`ErnieModel`] or [`TFErnieModel`]. + task_type_vocab_size (`int`, *optional*, defaults to 3): + The vocabulary size of the `task_type_ids` for ERNIE2.0/ERNIE3.0 model + use_task_id (`bool`, *optional*, defaults to `False`): + Whether or not the model support `task_type_ids` + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + + Examples: + + ```python + >>> from transformers import ErnieModel, ErnieConfig + + >>> # Initializing a ERNIE nghuyong/ernie-3.0-base-zh style configuration + >>> configuration = ErnieConfig() + + >>> # Initializing a model from the nghuyong/ernie-3.0-base-zh style configuration + >>> model = ErnieModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "ernie" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + task_type_vocab_size=3, + use_task_id=False, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=0, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, + **kwargs + ): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.task_type_vocab_size = task_type_vocab_size + self.use_task_id = use_task_id + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + + +class ErnieOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "multiple-choice": + dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} + else: + dynamic_axis = {0: "batch", 1: "sequence"} + return OrderedDict( + [ + ("input_ids", dynamic_axis), + ("attention_mask", dynamic_axis), + ("token_type_ids", dynamic_axis), + ("task_type_ids", dynamic_axis), + ] + ) diff --git a/src/transformers/models/ernie/modeling_ernie.py b/src/transformers/models/ernie/modeling_ernie.py new file mode 100644 index 00000000000000..0af6be3a707f0a --- /dev/null +++ b/src/transformers/models/ernie/modeling_ernie.py @@ -0,0 +1,1830 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""PyTorch ERNIE model.""" + + +import math +import warnings +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import ( + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + is_torch_greater_than_1_6, + prune_linear_layer, +) +from ...utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_ernie import ErnieConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "nghuyong/ernie-1.0-base-zh" +_CONFIG_FOR_DOC = "ErnieConfig" +_TOKENIZER_FOR_DOC = "BertTokenizer" + + +ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "nghuyong/ernie-1.0-base-zh", + "nghuyong/ernie-2.0-base-en", + "nghuyong/ernie-2.0-large-en", + "nghuyong/ernie-3.0-base-zh", + "nghuyong/ernie-3.0-medium-zh", + "nghuyong/ernie-3.0-mini-zh", + "nghuyong/ernie-3.0-micro-zh", + "nghuyong/ernie-3.0-nano-zh", + "nghuyong/ernie-gram-zh", + "nghuyong/ernie-health-zh", + # See all ERNIE models at https://huggingface.co/models?filter=ernie +] + + +class ErnieEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + self.use_task_id = config.use_task_id + if config.use_task_id: + self.task_type_embeddings = nn.Embedding(config.task_type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + if is_torch_greater_than_1_6: + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long), + persistent=False, + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + task_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + past_key_values_length: int = 0, + ) -> torch.Tensor: + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + + # add `task_type_id` for ERNIE model + if self.use_task_id: + if task_type_ids is None: + task_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + task_type_embeddings = self.task_type_embeddings(task_type_ids) + embeddings += task_type_embeddings + + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Ernie +class ErnieSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in ErnieModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Ernie +class ErnieSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Ernie +class ErnieAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = ErnieSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = ErnieSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Ernie +class ErnieIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Ernie +class ErnieOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Ernie +class ErnieLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = ErnieAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = ErnieAttention(config, position_embedding_type="absolute") + self.intermediate = ErnieIntermediate(config) + self.output = ErnieOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Ernie +class ErnieEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([ErnieLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->Ernie +class ErniePooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Ernie +class ErniePredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Ernie +class ErnieLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = ErniePredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Ernie +class ErnieOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = ErnieLMPredictionHead(config) + + def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +# Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->Ernie +class ErnieOnlyNSPHead(nn.Module): + def __init__(self, config): + super().__init__() + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, pooled_output): + seq_relationship_score = self.seq_relationship(pooled_output) + return seq_relationship_score + + +# Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->Ernie +class ErniePreTrainingHeads(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = ErnieLMPredictionHead(config) + self.seq_relationship = nn.Linear(config.hidden_size, 2) + + def forward(self, sequence_output, pooled_output): + prediction_scores = self.predictions(sequence_output) + seq_relationship_score = self.seq_relationship(pooled_output) + return prediction_scores, seq_relationship_score + + +class ErniePreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ErnieConfig + base_model_prefix = "ernie" + supports_gradient_checkpointing = True + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, ErnieEncoder): + module.gradient_checkpointing = value + + +@dataclass +# Copied from transformers.models.bert.modeling_bert.BertForPreTrainingOutput with Bert->Ernie +class ErnieForPreTrainingOutput(ModelOutput): + """ + Output type of [`ErnieForPreTraining`]. + + Args: + loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): + Total loss as the sum of the masked language modeling loss and the next sequence prediction + (classification) loss. + prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): + Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation + before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + loss: Optional[torch.FloatTensor] = None + prediction_logits: torch.FloatTensor = None + seq_relationship_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +ERNIE_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`ErnieConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ERNIE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + task_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Task type embedding is a special embedding to represent the characteristic of different tasks, such as + word-aware pre-training task, structure-aware pre-training task and semantic-aware pre-training task. We + assign a `task_type_id` to each task and the `task_type_id` is in the range `[0, + config.task_type_vocab_size-1] + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Ernie Model transformer outputting raw hidden-states without any specific head on top.", + ERNIE_START_DOCSTRING, +) +class ErnieModel(ErniePreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Ernie + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = ErnieEmbeddings(config) + self.encoder = ErnieEncoder(config) + + self.pooler = ErniePooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.bert.modeling_bert.BertModel.get_input_embeddings + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + # Copied from transformers.models.bert.modeling_bert.BertModel.set_input_embeddings + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + # Copied from transformers.models.bert.modeling_bert.BertModel._prune_heads + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + task_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + task_type_ids=task_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """ + Ernie Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next + sentence prediction (classification)` head. + """, + ERNIE_START_DOCSTRING, +) +class ErnieForPreTraining(ErniePreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.__init__ with Bert->Ernie,bert->ernie + def __init__(self, config): + super().__init__(config) + + self.ernie = ErnieModel(config) + self.cls = ErniePreTrainingHeads(config) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.get_output_embeddings + def get_output_embeddings(self): + return self.cls.predictions.decoder + + # Copied from transformers.models.bert.modeling_bert.BertForPreTraining.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=ErnieForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + task_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + next_sentence_label: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], ErnieForPreTrainingOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), + the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence + pair (see `input_ids` docstring) Indices should be in `[0, 1]`: + + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Used to hide legacy arguments that have been deprecated. + + Returns: + + Example: + + ```python + >>> from transformers import BertTokenizer, ErnieForPreTraining + >>> import torch + + >>> tokenizer = BertTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh") + >>> model = ErnieForPreTraining.from_pretrained("nghuyong/ernie-1.0-base-zh") + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.prediction_logits + >>> seq_relationship_logits = outputs.seq_relationship_logits + ``` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ernie( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + task_type_ids=task_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output, pooled_output = outputs[:2] + prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) + + total_loss = None + if labels is not None and next_sentence_label is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) + total_loss = masked_lm_loss + next_sentence_loss + + if not return_dict: + output = (prediction_scores, seq_relationship_score) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return ErnieForPreTrainingOutput( + loss=total_loss, + prediction_logits=prediction_scores, + seq_relationship_logits=seq_relationship_score, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """Ernie Model with a `language modeling` head on top for CLM fine-tuning.""", ERNIE_START_DOCSTRING +) +class ErnieForCausalLM(ErniePreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.__init__ with BertLMHeadModel->ErnieForCausalLM,Bert->Ernie,bert->ernie + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning("If you want to use `ErnieForCausalLM` as a standalone, add `is_decoder=True.`") + + self.ernie = ErnieModel(config, add_pooling_layer=False) + self.cls = ErnieOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.get_output_embeddings + def get_output_embeddings(self): + return self.cls.predictions.decoder + + # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=CausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + task_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.ernie( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + task_type_ids=task_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel.prepare_inputs_for_generation + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} + + # Copied from transformers.models.bert.modeling_bert.BertLMHeadModel._reorder_cache + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past + + +@add_start_docstrings("""Ernie Model with a `language modeling` head on top.""", ERNIE_START_DOCSTRING) +class ErnieForMaskedLM(ErniePreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.__init__ with Bert->Ernie,bert->ernie + def __init__(self, config): + super().__init__(config) + + if config.is_decoder: + logger.warning( + "If you want to use `ErnieForMaskedLM` make sure `config.is_decoder=False` for " + "bi-directional self-attention." + ) + + self.ernie = ErnieModel(config, add_pooling_layer=False) + self.cls = ErnieOnlyMLMHead(config) + + # Initialize weights and apply final processing + self.post_init() + + # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.get_output_embeddings + def get_output_embeddings(self): + return self.cls.predictions.decoder + + # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.set_output_embeddings + def set_output_embeddings(self, new_embeddings): + self.cls.predictions.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="'paris'", + expected_loss=0.88, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + task_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ernie( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + task_type_ids=task_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.cls(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() # -100 index = padding token + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.bert.modeling_bert.BertForMaskedLM.prepare_inputs_for_generation + def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + effective_batch_size = input_shape[0] + + # add a dummy token + if self.config.pad_token_id is None: + raise ValueError("The PAD token should be defined for generation") + + attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) + dummy_token = torch.full( + (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device + ) + input_ids = torch.cat([input_ids, dummy_token], dim=1) + + return {"input_ids": input_ids, "attention_mask": attention_mask} + + +@add_start_docstrings( + """Ernie Model with a `next sentence prediction (classification)` head on top.""", + ERNIE_START_DOCSTRING, +) +class ErnieForNextSentencePrediction(ErniePreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForNextSentencePrediction.__init__ with Bert->Ernie,bert->ernie + def __init__(self, config): + super().__init__(config) + + self.ernie = ErnieModel(config) + self.cls = ErnieOnlyNSPHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + task_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs, + ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair + (see `input_ids` docstring). Indices should be in `[0, 1]`: + + - 0 indicates sequence B is a continuation of sequence A, + - 1 indicates sequence B is a random sequence. + + Returns: + + Example: + + ```python + >>> from transformers import BertTokenizer, ErnieForNextSentencePrediction + >>> import torch + + >>> tokenizer = BertTokenizer.from_pretrained("nghuyong/ernie-1.0-base-zh") + >>> model = ErnieForNextSentencePrediction.from_pretrained("nghuyong/ernie-1.0-base-zh") + + >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." + >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." + >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") + + >>> outputs = model(**encoding, labels=torch.LongTensor([1])) + >>> logits = outputs.logits + >>> assert logits[0, 0] < logits[0, 1] # next sentence was random + ``` + """ + + if "next_sentence_label" in kwargs: + warnings.warn( + "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" + " `labels` instead.", + FutureWarning, + ) + labels = kwargs.pop("next_sentence_label") + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ernie( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + task_type_ids=task_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + seq_relationship_scores = self.cls(pooled_output) + + next_sentence_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) + + if not return_dict: + output = (seq_relationship_scores,) + outputs[2:] + return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output + + return NextSentencePredictorOutput( + loss=next_sentence_loss, + logits=seq_relationship_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Ernie Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled + output) e.g. for GLUE tasks. + """, + ERNIE_START_DOCSTRING, +) +class ErnieForSequenceClassification(ErniePreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with Bert->Ernie,bert->ernie + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.ernie = ErnieModel(config) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + task_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ernie( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + task_type_ids=task_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Ernie Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + ERNIE_START_DOCSTRING, +) +class ErnieForMultipleChoice(ErniePreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.__init__ with Bert->Ernie,bert->ernie + def __init__(self, config): + super().__init__(config) + + self.ernie = ErnieModel(config) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + task_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.ernie( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + task_type_ids=task_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Ernie Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + ERNIE_START_DOCSTRING, +) +class ErnieForTokenClassification(ErniePreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler"] + + # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with Bert->Ernie,bert->ernie + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.ernie = ErnieModel(config, add_pooling_layer=False) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + task_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ernie( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + task_type_ids=task_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + Ernie Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + ERNIE_START_DOCSTRING, +) +class ErnieForQuestionAnswering(ErniePreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler"] + + # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with Bert->Ernie,bert->ernie + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.ernie = ErnieModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ERNIE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + task_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + start_positions: Optional[torch.Tensor] = None, + end_positions: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.ernie( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + task_type_ids=task_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index c3017bc6a6e05f..34a4cf335274fd 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -1875,6 +1875,79 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ErnieForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForMultipleChoice(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForNextSentencePrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForPreTraining(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErnieModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ErniePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/ernie/__init__.py b/tests/models/ernie/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/ernie/test_modeling_ernie.py b/tests/models/ernie/test_modeling_ernie.py new file mode 100644 index 00000000000000..243550cea8d61e --- /dev/null +++ b/tests/models/ernie/test_modeling_ernie.py @@ -0,0 +1,577 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import tempfile +import unittest + +from transformers import ErnieConfig, is_torch_available +from transformers.models.auto import get_values +from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device + +from ...generation.test_generation_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask + + +if is_torch_available(): + import torch + + from transformers import ( + MODEL_FOR_PRETRAINING_MAPPING, + ErnieForCausalLM, + ErnieForMaskedLM, + ErnieForMultipleChoice, + ErnieForNextSentencePrediction, + ErnieForPreTraining, + ErnieForQuestionAnswering, + ErnieForSequenceClassification, + ErnieForTokenClassification, + ErnieModel, + ) + from transformers.models.ernie.modeling_ernie import ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST + + +class ErnieModelTester: + def __init__( + self, + parent, + batch_size=13, + seq_length=7, + is_training=True, + use_input_mask=True, + use_token_type_ids=True, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + type_sequence_label_size=2, + initializer_range=0.02, + num_labels=3, + num_choices=4, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_token_type_ids = use_token_type_ids + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.num_labels = num_labels + self.num_choices = num_choices + self.scope = scope + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + token_type_ids = None + if self.use_token_type_ids: + token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) + + sequence_labels = None + token_labels = None + choice_labels = None + if self.use_labels: + sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) + choice_labels = ids_tensor([self.batch_size], self.num_choices) + + config = self.get_config() + + return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + + def get_config(self): + """ + Returns a tiny configuration by default. + """ + return ErnieConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + is_decoder=False, + initializer_range=self.initializer_range, + ) + + def prepare_config_and_inputs_for_decoder(self): + ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + ) = self.prepare_config_and_inputs() + + config.is_decoder = True + encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) + encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) + + return ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ) + + def create_and_check_model( + self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + model = ErnieModel(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) + result = model(input_ids, token_type_ids=token_type_ids) + result = model(input_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def create_and_check_model_as_decoder( + self, + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ): + config.add_cross_attention = True + model = ErnieModel(config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + attention_mask=input_mask, + token_type_ids=token_type_ids, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + ) + result = model( + input_ids, + attention_mask=input_mask, + token_type_ids=token_type_ids, + encoder_hidden_states=encoder_hidden_states, + ) + result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def create_and_check_for_causal_lm( + self, + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ): + model = ErnieForCausalLM(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) + + def create_and_check_for_masked_lm( + self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + model = ErnieForMaskedLM(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) + + def create_and_check_model_for_causal_lm_as_decoder( + self, + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ): + config.add_cross_attention = True + model = ErnieForCausalLM(config=config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + attention_mask=input_mask, + token_type_ids=token_type_ids, + labels=token_labels, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + ) + result = model( + input_ids, + attention_mask=input_mask, + token_type_ids=token_type_ids, + labels=token_labels, + encoder_hidden_states=encoder_hidden_states, + ) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) + + def create_and_check_decoder_model_past_large_inputs( + self, + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ): + config.is_decoder = True + config.add_cross_attention = True + model = ErnieForCausalLM(config=config).to(torch_device).eval() + + # first forward pass + outputs = model( + input_ids, + attention_mask=input_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=True, + ) + past_key_values = outputs.past_key_values + + # create hypothetical multiple next token and extent to next_input_ids + next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) + next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) + + # append to next input_ids and + next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) + next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) + + output_from_no_past = model( + next_input_ids, + attention_mask=next_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_hidden_states=True, + )["hidden_states"][0] + output_from_past = model( + next_tokens, + attention_mask=next_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + output_hidden_states=True, + )["hidden_states"][0] + + # select random slice + random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() + output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() + output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() + + self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) + + # test that outputs are equal for slice + self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) + + def create_and_check_for_next_sequence_prediction( + self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + model = ErnieForNextSentencePrediction(config=config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + attention_mask=input_mask, + token_type_ids=token_type_ids, + labels=sequence_labels, + ) + self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) + + def create_and_check_for_pretraining( + self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + model = ErnieForPreTraining(config=config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + attention_mask=input_mask, + token_type_ids=token_type_ids, + labels=token_labels, + next_sentence_label=sequence_labels, + ) + self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) + self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) + + def create_and_check_for_question_answering( + self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + model = ErnieForQuestionAnswering(config=config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + attention_mask=input_mask, + token_type_ids=token_type_ids, + start_positions=sequence_labels, + end_positions=sequence_labels, + ) + self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) + self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) + + def create_and_check_for_sequence_classification( + self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + config.num_labels = self.num_labels + model = ErnieForSequenceClassification(config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) + + def create_and_check_for_token_classification( + self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + config.num_labels = self.num_labels + model = ErnieForTokenClassification(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) + + def create_and_check_for_multiple_choice( + self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + config.num_choices = self.num_choices + model = ErnieForMultipleChoice(config=config) + model.to(torch_device) + model.eval() + multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() + multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() + multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() + result = model( + multiple_choice_inputs_ids, + attention_mask=multiple_choice_input_mask, + token_type_ids=multiple_choice_token_type_ids, + labels=choice_labels, + ) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + ) = config_and_inputs + inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} + return config, inputs_dict + + +@require_torch +class ErnieModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = ( + ( + ErnieModel, + ErnieForCausalLM, + ErnieForMaskedLM, + ErnieForMultipleChoice, + ErnieForNextSentencePrediction, + ErnieForPreTraining, + ErnieForQuestionAnswering, + ErnieForSequenceClassification, + ErnieForTokenClassification, + ) + if is_torch_available() + else () + ) + all_generative_model_classes = (ErnieForCausalLM,) if is_torch_available() else () + fx_compatible = False + + # special case for ForPreTraining model + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) + + if return_labels: + if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): + inputs_dict["labels"] = torch.zeros( + (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device + ) + inputs_dict["next_sentence_label"] = torch.zeros( + self.model_tester.batch_size, dtype=torch.long, device=torch_device + ) + return inputs_dict + + def setUp(self): + self.model_tester = ErnieModelTester(self) + self.config_tester = ConfigTester(self, config_class=ErnieConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_model_various_embeddings(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + for type in ["absolute", "relative_key", "relative_key_query"]: + config_and_inputs[0].position_embedding_type = type + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_model_as_decoder(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() + self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) + + def test_model_as_decoder_with_default_input_mask(self): + # This regression test was failing with PyTorch < 1.3 + ( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ) = self.model_tester.prepare_config_and_inputs_for_decoder() + + input_mask = None + + self.model_tester.create_and_check_model_as_decoder( + config, + input_ids, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + encoder_hidden_states, + encoder_attention_mask, + ) + + def test_for_causal_lm(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() + self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) + + def test_for_masked_lm(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) + + def test_for_causal_lm_decoder(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() + self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs) + + def test_decoder_model_past_with_large_inputs(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() + self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) + + def test_for_multiple_choice(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) + + def test_for_next_sequence_prediction(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) + + def test_for_pretraining(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_pretraining(*config_and_inputs) + + def test_for_question_answering(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_question_answering(*config_and_inputs) + + def test_for_sequence_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) + + def test_for_token_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_token_classification(*config_and_inputs) + + @slow + def test_model_from_pretrained(self): + for model_name in ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = ErnieModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + @slow + @require_torch_gpu + def test_torchscript_device_change(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + for model_class in self.all_model_classes: + # ErnieForMultipleChoice behaves incorrectly in JIT environments. + if model_class == ErnieForMultipleChoice: + return + + config.torchscript = True + model = model_class(config=config) + + inputs_dict = self._prepare_for_class(inputs_dict, model_class) + traced_model = torch.jit.trace( + model, (inputs_dict["input_ids"].to("cpu"), inputs_dict["attention_mask"].to("cpu")) + ) + + with tempfile.TemporaryDirectory() as tmp: + torch.jit.save(traced_model, os.path.join(tmp, "ernie.pt")) + loaded = torch.jit.load(os.path.join(tmp, "ernie.pt"), map_location=torch_device) + loaded(inputs_dict["input_ids"].to(torch_device), inputs_dict["attention_mask"].to(torch_device)) From e6f221c8d4829c9a3bca699c18a32043ab21f7a0 Mon Sep 17 00:00:00 2001 From: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> Date: Fri, 9 Sep 2022 14:18:56 +0100 Subject: [PATCH 247/539] [JAX] Replace all jax.tree_* calls with jax.tree_util.tree_* (#18361) * [JAX] Replace all jax.tree_* calls with jax.tree_util.tree_* * fix double tree_util --- .../run_image_captioning_flax.py | 4 ++-- .../flax/language-modeling/run_clm_flax.py | 4 ++-- .../flax/language-modeling/run_mlm_flax.py | 10 +++++----- .../flax/language-modeling/run_t5_mlm_flax.py | 6 +++--- .../summarization/run_summarization_flax.py | 6 +++--- .../flax/vision/run_image_classification.py | 4 ++-- .../jax-projects/big_bird/bigbird_flax.py | 2 +- .../dataset-streaming/run_mlm_flax_stream.py | 6 +++--- .../hybrid_clip/run_hybrid_clip.py | 2 +- .../jax-projects/model_parallel/run_clm_mp.py | 8 ++++---- .../wav2vec2/run_wav2vec2_pretrain_flax.py | 4 ++-- .../performer/run_mlm_performer.py | 4 ++-- src/transformers/generation_flax_utils.py | 6 +++--- .../modeling_flax_pytorch_utils.py | 4 ++-- src/transformers/modeling_flax_utils.py | 6 +++--- .../convert_owlvit_original_flax_to_hf.py | 4 ++-- tests/test_modeling_flax_common.py | 18 +++++++++--------- 17 files changed, 49 insertions(+), 49 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 348a719857830a..5b3fd187f04179 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -1011,7 +1011,7 @@ def save_ckpt(ckpt_dir: str, commit_msg: str = ""): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir), params=params) tokenizer.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir)) if training_args.push_to_hub: @@ -1064,7 +1064,7 @@ def evaluation_loop( if metrics: # normalize metrics metrics = get_metrics(metrics) - metrics = jax.tree_map(jnp.mean, metrics) + metrics = jax.tree_util.tree_map(jnp.mean, metrics) # compute ROUGE metrics generations = [] diff --git a/examples/flax/language-modeling/run_clm_flax.py b/examples/flax/language-modeling/run_clm_flax.py index 1a0428fdd67039..7e0d1010c14cf2 100755 --- a/examples/flax/language-modeling/run_clm_flax.py +++ b/examples/flax/language-modeling/run_clm_flax.py @@ -781,7 +781,7 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) try: eval_metrics["perplexity"] = math.exp(eval_metrics["loss"]) @@ -824,7 +824,7 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(lambda x: jnp.mean(x).item(), eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: jnp.mean(x).item(), eval_metrics) try: eval_metrics["perplexity"] = math.exp(eval_metrics["loss"]) diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index 408e09fc111cb3..5e1519bbd5f215 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -827,9 +827,9 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.sum, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})" @@ -841,7 +841,7 @@ def eval_step(params, batch): if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: @@ -867,9 +867,9 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) try: perplexity = math.exp(eval_metrics["loss"]) diff --git a/examples/flax/language-modeling/run_t5_mlm_flax.py b/examples/flax/language-modeling/run_t5_mlm_flax.py index 0030fc8da66a57..c9d748de3d5c09 100755 --- a/examples/flax/language-modeling/run_t5_mlm_flax.py +++ b/examples/flax/language-modeling/run_t5_mlm_flax.py @@ -940,7 +940,7 @@ def eval_step(params, batch): # get eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # Update progress bar epochs.write(f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})") @@ -952,7 +952,7 @@ def eval_step(params, batch): if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: @@ -978,7 +978,7 @@ def eval_step(params, batch): # get eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(lambda metric: jnp.mean(metric).item(), eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.mean(metric).item(), eval_metrics) if jax.process_index() == 0: eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()} diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index 2813c88a3bd6fd..ed151b8bbe031d 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -902,7 +902,7 @@ def generate_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # compute ROUGE metrics rouge_desc = "" @@ -923,7 +923,7 @@ def generate_step(params, batch): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: @@ -957,7 +957,7 @@ def generate_step(params, batch): # normalize prediction metrics pred_metrics = get_metrics(pred_metrics) - pred_metrics = jax.tree_map(jnp.mean, pred_metrics) + pred_metrics = jax.tree_util.tree_map(jnp.mean, pred_metrics) # compute ROUGE metrics rouge_desc = "" diff --git a/examples/flax/vision/run_image_classification.py b/examples/flax/vision/run_image_classification.py index 3de3c977ab1d46..22065438d2ac73 100644 --- a/examples/flax/vision/run_image_classification.py +++ b/examples/flax/vision/run_image_classification.py @@ -542,7 +542,7 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # Print metrics and update progress bar eval_step_progress_bar.close() @@ -560,7 +560,7 @@ def eval_step(params, batch): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) if training_args.push_to_hub: repo.push_to_hub(commit_message=f"Saving weights and logs of epoch {epoch}", blocking=False) diff --git a/examples/research_projects/jax-projects/big_bird/bigbird_flax.py b/examples/research_projects/jax-projects/big_bird/bigbird_flax.py index d27212547219f2..b9ff9da28140a4 100644 --- a/examples/research_projects/jax-projects/big_bird/bigbird_flax.py +++ b/examples/research_projects/jax-projects/big_bird/bigbird_flax.py @@ -104,7 +104,7 @@ class DataCollator: def __call__(self, batch): batch = self.collate_fn(batch) - batch = jax.tree_map(shard, batch) + batch = jax.tree_util.tree_map(shard, batch) return batch def collate_fn(self, features): diff --git a/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py b/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py index fadcec09cbf0e1..e6bbdbee8cb51e 100755 --- a/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py +++ b/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py @@ -608,9 +608,9 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.sum, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar steps.desc = ( @@ -624,7 +624,7 @@ def eval_step(params, batch): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained( training_args.output_dir, params=params, diff --git a/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py b/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py index 6ee974666a291a..1be46f6af99368 100644 --- a/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py +++ b/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py @@ -551,7 +551,7 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # Print metrics and update progress bar eval_step_progress_bar.close() diff --git a/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py b/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py index 518ef9f7b22f3e..16eb1007b4c73b 100644 --- a/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py +++ b/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py @@ -481,7 +481,7 @@ def get_initial_state(params): param_spec = set_partitions(unfreeze(model.params)) # Get the PyTree for opt_state, we don't actually initialize the opt_state yet. - params_shapes = jax.tree_map(lambda x: x.shape, model.params) + params_shapes = jax.tree_util.tree_map(lambda x: x.shape, model.params) state_shapes = jax.eval_shape(get_initial_state, params_shapes) # get PartitionSpec for opt_state, this is very specific to adamw @@ -492,7 +492,7 @@ def get_opt_spec(x): return param_spec return None - opt_state_spec, param_spec = jax.tree_map( + opt_state_spec, param_spec = jax.tree_util.tree_map( get_opt_spec, state_shapes, is_leaf=lambda x: isinstance(x, (dict, optax.EmptyState)) ) @@ -506,7 +506,7 @@ def get_opt_spec(x): # hack: move the inital params to CPU to free up device memory # TODO: allow loading weights on CPU in pre-trained model - model.params = jax.tree_map(lambda x: np.asarray(x), model.params) + model.params = jax.tree_util.tree_map(lambda x: np.asarray(x), model.params) # mesh defination mesh_devices = np.array(jax.devices()).reshape(1, jax.local_device_count()) @@ -636,7 +636,7 @@ def eval_step(input_ids, labels, params): # normalize eval metrics eval_metrics = stack_forest(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) try: eval_metrics["perplexity"] = math.exp(eval_metrics["loss"]) diff --git a/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py b/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py index 457c58d44fde5a..71bf60d2c6027d 100755 --- a/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py +++ b/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py @@ -591,7 +591,7 @@ def eval_step(params, batch): # get eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.mean, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # Update progress bar epochs.write( @@ -606,7 +606,7 @@ def eval_step(params, batch): # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub) diff --git a/examples/research_projects/performer/run_mlm_performer.py b/examples/research_projects/performer/run_mlm_performer.py index 8e8fe917653e05..35de233f727ea4 100644 --- a/examples/research_projects/performer/run_mlm_performer.py +++ b/examples/research_projects/performer/run_mlm_performer.py @@ -674,9 +674,9 @@ def tokenize_function(examples): eval_metrics.append(metrics) eval_metrics_np = get_metrics(eval_metrics) - eval_metrics_np = jax.tree_map(jnp.sum, eval_metrics_np) + eval_metrics_np = jax.tree_util.tree_map(jnp.sum, eval_metrics_np) eval_normalizer = eval_metrics_np.pop("normalizer") - eval_summary = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics_np) + eval_summary = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics_np) # Update progress bar epochs.desc = ( diff --git a/src/transformers/generation_flax_utils.py b/src/transformers/generation_flax_utils.py index 1c052aae7bafb6..353df6fdbba636 100644 --- a/src/transformers/generation_flax_utils.py +++ b/src/transformers/generation_flax_utils.py @@ -699,7 +699,7 @@ def gather_fn(tensor): else: return tensor[batch_indices, beam_indices] - return jax.tree_map(gather_fn, nested) + return jax.tree_util.tree_map(gather_fn, nested) # init values max_length = max_length if max_length is not None else self.config.max_length @@ -788,7 +788,7 @@ def beam_search_body_fn(state, input_ids_length=1): model_outputs = model(input_token, params=params, **state.model_kwargs) logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams) - cache = jax.tree_map( + cache = jax.tree_util.tree_map( lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values ) @@ -874,7 +874,7 @@ def beam_search_body_fn(state, input_ids_length=1): # With these, gather the top k beam-associated caches. next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams) next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams) - model_outputs["past_key_values"] = jax.tree_map(lambda x: flatten_beam_dim(x), next_cache) + model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache) next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) return BeamSearchState( diff --git a/src/transformers/modeling_flax_pytorch_utils.py b/src/transformers/modeling_flax_pytorch_utils.py index 76eaa53f89d04c..47da8c2871b321 100644 --- a/src/transformers/modeling_flax_pytorch_utils.py +++ b/src/transformers/modeling_flax_pytorch_utils.py @@ -253,7 +253,7 @@ def load_flax_weights_in_pytorch_model(pt_model, flax_state): raise # check if we have bf16 weights - is_type_bf16 = flatten_dict(jax.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values() + is_type_bf16 = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).values() if any(is_type_bf16): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. @@ -261,7 +261,7 @@ def load_flax_weights_in_pytorch_model(pt_model, flax_state): "Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` " "before loading those in PyTorch model." ) - flax_state = jax.tree_map( + flax_state = jax.tree_util.tree_map( lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state ) diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index 00bb5480ffe3e9..b19f3db77e1900 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -303,10 +303,10 @@ def conditional_cast(param): return param if mask is None: - return jax.tree_map(conditional_cast, params) + return jax.tree_util.tree_map(conditional_cast, params) flat_params = flatten_dict(params) - flat_mask, _ = jax.tree_flatten(mask) + flat_mask, _ = jax.tree_util.tree_flatten(mask) for masked, key in zip(flat_mask, flat_params.keys()): if masked: @@ -900,7 +900,7 @@ def from_pretrained( ) # dictionary of key: dtypes for the model params - param_dtypes = jax.tree_map(lambda x: x.dtype, state) + param_dtypes = jax.tree_util.tree_map(lambda x: x.dtype, state) # extract keys of parameters not in jnp.float32 fp16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.float16] bf16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.bfloat16] diff --git a/src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py b/src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py index dde57c168adeab..09942fa3928d0b 100644 --- a/src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py +++ b/src/transformers/models/owlvit/convert_owlvit_original_flax_to_hf.py @@ -90,7 +90,7 @@ def flatten_nested_dict(params, parent_key="", sep="/"): def to_f32(params): - return jax.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, params) + return jax.tree_util.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, params) def copy_attn_layer(hf_attn_layer, pt_attn_layer): @@ -398,7 +398,7 @@ def convert_owlvit_checkpoint(pt_backbone, flax_params, attn_params, pytorch_dum # Load from checkpoint and convert params to float-32 variables = checkpoints.restore_checkpoint(args.owlvit_checkpoint, target=None)["optimizer"]["target"] - flax_params = jax.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, variables) + flax_params = jax.tree_util.tree_map(lambda x: x.astype(jnp.float32) if x.dtype == jnp.bfloat16 else x, variables) del variables # Convert CLIP backbone diff --git a/tests/test_modeling_flax_common.py b/tests/test_modeling_flax_common.py index 837f874889ae7d..37171e21388cf3 100644 --- a/tests/test_modeling_flax_common.py +++ b/tests/test_modeling_flax_common.py @@ -776,7 +776,7 @@ def test_default_params_dtype(self): for model_class in self.all_model_classes: # check if all params are still in float32 when dtype of computation is half-precision model = model_class(config, dtype=jnp.float16) - types = jax.tree_map(lambda x: x.dtype, model.params) + types = jax.tree_util.tree_map(lambda x: x.dtype, model.params) types = flatten_dict(types) for name, type_ in types.items(): @@ -790,7 +790,7 @@ def test_to_bf16(self): # cast all params to bf16 params = model.to_bf16(model.params) - types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) + types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") @@ -802,7 +802,7 @@ def test_to_bf16(self): mask = unflatten_dict(mask) params = model.to_bf16(model.params, mask) - types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) + types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 except key for name, type_ in types.items(): if name == key: @@ -818,7 +818,7 @@ def test_to_fp16(self): # cast all params to fp16 params = model.to_fp16(model.params) - types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) + types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") @@ -830,7 +830,7 @@ def test_to_fp16(self): mask = unflatten_dict(mask) params = model.to_fp16(model.params, mask) - types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) + types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 except key for name, type_ in types.items(): if name == key: @@ -849,7 +849,7 @@ def test_to_fp32(self): params = model.to_fp32(params) # test if all params are in fp32 - types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) + types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") @@ -864,7 +864,7 @@ def test_to_fp32(self): params = model.to_fp32(params, mask) # test if all params are in fp32 except key - types = flatten_dict(jax.tree_map(lambda x: x.dtype, params)) + types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.") @@ -884,7 +884,7 @@ def test_save_load_in_fp16(self): # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) - types = flatten_dict(jax.tree_map(lambda x: x.dtype, model.params)) + types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") @@ -901,7 +901,7 @@ def test_save_load_in_bf16(self): # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) - types = flatten_dict(jax.tree_map(lambda x: x.dtype, model.params)) + types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") From 85125fcffdf0b0e7d2b0fb8f04cc6fa606f1efe6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Jankowski?= Date: Fri, 9 Sep 2022 17:37:34 +0200 Subject: [PATCH 248/539] Neptune.ai integration improvements (#18934) * NeptuneCallback improvements * After review suggestions and deduplication of initial run * Added volatile checkpoints support due to missing post-rebase commit * Update README per review comments - Remove list formatting - Correct Neptune docs link Co-authored-by: Sabine --- docs/source/en/main_classes/callback.mdx | 3 + examples/pytorch/README.md | 84 ++++++ src/transformers/__init__.py | 2 + src/transformers/integrations.py | 324 +++++++++++++++++++---- src/transformers/training_args.py | 4 +- 5 files changed, 359 insertions(+), 58 deletions(-) diff --git a/docs/source/en/main_classes/callback.mdx b/docs/source/en/main_classes/callback.mdx index 1d7d0b03d23253..7c5b48f5d49130 100644 --- a/docs/source/en/main_classes/callback.mdx +++ b/docs/source/en/main_classes/callback.mdx @@ -32,6 +32,7 @@ By default a [`Trainer`] will use the following callbacks: - [`~integrations.WandbCallback`] if [wandb](https://www.wandb.com/) is installed. - [`~integrations.CometCallback`] if [comet_ml](https://www.comet.ml/site/) is installed. - [`~integrations.MLflowCallback`] if [mlflow](https://www.mlflow.org/) is installed. +- [`~integrations.NeptuneCallback`] if [neptune](https://neptune.ai/) is installed. - [`~integrations.AzureMLCallback`] if [azureml-sdk](https://pypi.org/project/azureml-sdk/) is installed. - [`~integrations.CodeCarbonCallback`] if [codecarbon](https://pypi.org/project/codecarbon/) is @@ -70,6 +71,8 @@ Here is the list of the available [`TrainerCallback`] in the library: [[autodoc]] integrations.CodeCarbonCallback +[[autodoc]] integrations.NeptuneCallback + ## TrainerCallback [[autodoc]] TrainerCallback diff --git a/examples/pytorch/README.md b/examples/pytorch/README.md index 442511ead93a7a..b0b099bae2ba88 100644 --- a/examples/pytorch/README.md +++ b/examples/pytorch/README.md @@ -198,6 +198,7 @@ You can easily log and monitor your runs code. The following are currently suppo * [TensorBoard](https://www.tensorflow.org/tensorboard) * [Weights & Biases](https://docs.wandb.ai/integrations/huggingface) * [Comet ML](https://www.comet.ml/docs/python-sdk/huggingface/) +* [Neptune](https://docs.neptune.ai/integrations-and-supported-tools/model-training/hugging-face) ### Weights & Biases @@ -251,3 +252,86 @@ or if in a Conda environment: ```bash conda install -c comet_ml -c anaconda -c conda-forge comet_ml ``` + +### Neptune + +First, install the Neptune client library. You can do it with either `pip` or `conda`: + +`pip`: + +```bash +pip install neptune-client +``` + +`conda`: + +```bash +conda install -c conda-forge neptune-client +``` + +Next, in your model training script, import `NeptuneCallback`: + +```python +from transformers.integrations import NeptuneCallback +``` + +To enable Neptune logging, in your `TrainingArguments`, set the `report_to` argument to `"neptune"`: + +```python +training_args = TrainingArguments( + "quick-training-distilbert-mrpc", + evaluation_strategy="steps", + eval_steps = 20, + report_to = "neptune", +) + +trainer = Trainer( + model, + training_args, + ... +) +``` + +Alternatively, for more logging options, create a Neptune callback: + +```python +neptune_callback = NeptuneCallback() +``` + +To add more detail to the tracked run, you can supply optional arguments to `NeptuneCallback`. + +Some examples: + +```python +neptune_callback = NeptuneCallback( + name = "DistilBERT", + description = "DistilBERT fine-tuned on GLUE/MRPC", + tags = ["args-callback", "fine-tune", "MRPC"], # tags help you manage runs in Neptune + base_namespace="callback", # the default is "finetuning" + log_checkpoints = "best", # other options are "last", "same", and None + capture_hardware_metrics = False, # additional keyword arguments for a Neptune run +) +``` + +Pass the callback to the Trainer: + +```python +training_args = TrainingArguments(..., report_to = None) +trainer = Trainer( + model, + training_args, + ... + callbacks=[neptune_callback], +) +``` + +Now, when you start the training with `trainer.train()`, your metadata will be logged in Neptune. + +**Note:** Although you can pass your **Neptune API token** and **project name** as arguments when creating the callback, the recommended way is to save them as environment variables: + +| Environment variable | Value | +| :------------------- | :--------------------------------------------------- | +| `NEPTUNE_API_TOKEN` | Your Neptune API token. To find and copy it, click your Neptune avatar and select **Get your API token**. | +| `NEPTUNE_PROJECT` | The full name of your Neptune project (`workspace-name/project-name`). To find and copy it, head to **project settings** → **Properties**. | + +For detailed instructions and examples, see the [Neptune docs](https://docs.neptune.ai/integrations-and-supported-tools/model-training/hugging-face). diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 3bea1e4ad40b25..ee23c79db6b487 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -100,6 +100,7 @@ "hf_argparser": ["HfArgumentParser"], "integrations": [ "is_comet_available", + "is_neptune_available", "is_optuna_available", "is_ray_available", "is_ray_tune_available", @@ -2986,6 +2987,7 @@ # Integrations from .integrations import ( is_comet_available, + is_neptune_available, is_optuna_available, is_ray_available, is_ray_tune_available, diff --git a/src/transformers/integrations.py b/src/transformers/integrations.py index 42569d6e222082..89493f779087b0 100644 --- a/src/transformers/integrations.py +++ b/src/transformers/integrations.py @@ -19,10 +19,15 @@ import json import numbers import os +import shutil import sys import tempfile from pathlib import Path +from typing import TYPE_CHECKING, Dict, Optional +import numpy as np + +from . import __version__ as version from .utils import flatten_dict, is_datasets_available, logging @@ -44,6 +49,10 @@ except (ImportError, ValueError): _has_comet = False +_has_neptune = importlib.util.find_spec("neptune") is not None +if TYPE_CHECKING and _has_neptune: + from neptune.new.metadata_containers.run import Run + from .trainer_callback import ProgressCallback, TrainerCallback # noqa: E402 from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy # noqa: E402 from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available # noqa: E402 @@ -106,7 +115,7 @@ def is_fairscale_available(): def is_neptune_available(): - return importlib.util.find_spec("neptune") is not None + return _has_neptune def is_codecarbon_available(): @@ -449,6 +458,8 @@ def get_available_reporting_integrations(): integrations.append("comet_ml") if is_mlflow_available(): integrations.append("mlflow") + if is_neptune_available(): + integrations.append("neptune") if is_tensorboard_available(): integrations.append("tensorboard") if is_wandb_available(): @@ -925,75 +936,276 @@ def __del__(self): self._ml_flow.end_run() +class NeptuneMissingConfiguration(Exception): + def __init__(self): + super().__init__( + """ + ------ Unsupported ---- We were not able to create new runs. You provided a custom Neptune run to + `NeptuneCallback` with the `run` argument. For the integration to work fully, provide your `api_token` and + `project` by saving them as environment variables or passing them to the callback. + """ + ) + + class NeptuneCallback(TrainerCallback): - """ - A [`TrainerCallback`] that sends the logs to [Neptune](https://neptune.ai). + """TrainerCallback that sends the logs to [Neptune](https://neptune.ai). + + Args: + api_token (`str`, optional): + Neptune API token obtained upon registration. You can leave this argument out if you have saved your token + to the `NEPTUNE_API_TOKEN` environment variable (strongly recommended). See full setup instructions in the + [docs](https://docs.neptune.ai/getting-started/installation). + project (`str`, optional): + Name of an existing Neptune project, in the form: "workspace-name/project-name". You can find and copy the + name from the project Settings -> Properties in Neptune. If None (default), the value of the + `NEPTUNE_PROJECT` environment variable will be used. + name (`str`, optional): Custom name for the run. + base_namespace (`str`, optional, defaults to "finetuning"): In the Neptune run, the root namespace + that will contain all of the logged metadata. + log_parameters (`bool`, optional, defaults to True): + If True, logs all Trainer arguments and model parameters provided by the Trainer. + log_checkpoints (`str`, optional, defaults to None): + If "same", uploads checkpoints whenever they are saved by the Trainer. If "last", uploads only the most + recently saved checkpoint. If "best", uploads the best checkpoint (among the ones saved by the Trainer). If + None, does not upload checkpoints. + run (`Run`, optional): + Pass a Neptune run object if you want to continue logging to an existing run. Read more about resuming runs + in the [docs](https://docs.neptune.ai/how-to-guides/neptune-api/resume-run). + **neptune_run_kwargs (optional): + Additional keyword arguments to be passed directly to the + [neptune.init_run()](https://docs.neptune.ai/api-reference/neptune#.init_run) function when a new run is + created. """ - def __init__(self): + integration_version_key = "source_code/integrations/transformers" + model_parameters_key = "model_parameters" + trial_name_key = "trial" + trial_params_key = "trial_params" + trainer_parameters_key = "trainer_parameters" + flat_metrics = {"train/epoch"} + + def __init__( + self, + *, + api_token: Optional[str] = None, + project: Optional[str] = None, + name: Optional[str] = None, + base_namespace: str = "finetuning", + run: Optional["Run"] = None, + log_parameters: bool = True, + log_checkpoints: Optional[str] = None, + **neptune_run_kwargs + ): if not is_neptune_available(): raise ValueError( - "NeptuneCallback requires neptune-client to be installed. Run `pip install neptune-client`." + "NeptuneCallback requires the Neptune client library to be installed. " + "To install the library, run `pip install neptune-client`." ) - import neptune.new as neptune - self._neptune = neptune - self._initialized = False - self._log_artifacts = False + from neptune.new.metadata_containers.run import Run - def setup(self, args, state, model): - """ - Setup the Neptune integration. + try: + from neptune.new.integrations.utils import verify_type + except ImportError: + from neptune.new.internal.utils import verify_type + + verify_type("api_token", api_token, (str, type(None))) + verify_type("project", project, (str, type(None))) + verify_type("name", name, (str, type(None))) + verify_type("base_namespace", base_namespace, str) + verify_type("run", run, (Run, type(None))) + verify_type("log_parameters", log_parameters, bool) + verify_type("log_checkpoints", log_checkpoints, (str, type(None))) + + self._base_namespace_path = base_namespace + self._log_parameters = log_parameters + self._log_checkpoints = log_checkpoints + self._initial_run: Optional[Run] = run + + self._run = None + self._is_monitoring_run = False + self._run_id = None + self._force_reset_monitoring_run = False + self._init_run_kwargs = {"api_token": api_token, "project": project, "name": name, **neptune_run_kwargs} + + self._volatile_checkpoints_dir = None + self._should_upload_checkpoint = self._log_checkpoints is not None + self._recent_checkpoint_path = None + + if self._log_checkpoints in {"last", "best"}: + self._target_checkpoints_namespace = f"checkpoints/{self._log_checkpoints}" + self._should_clean_recently_uploaded_checkpoint = True + else: + self._target_checkpoints_namespace = "checkpoints" + self._should_clean_recently_uploaded_checkpoint = False - Environment: - NEPTUNE_PROJECT (`str`, *required*): - The project ID for neptune.ai account. Should be in format *workspace_name/project_name* - NEPTUNE_API_TOKEN (`str`, *required*): - API-token for neptune.ai account - NEPTUNE_CONNECTION_MODE (`str`, *optional*): - Neptune connection mode. *async* by default - NEPTUNE_RUN_NAME (`str`, *optional*): - The name of run process on Neptune dashboard - """ - if state.is_world_process_zero: - self._neptune_run = self._neptune.init( - project=os.getenv("NEPTUNE_PROJECT"), - api_token=os.getenv("NEPTUNE_API_TOKEN"), - mode=os.getenv("NEPTUNE_CONNECTION_MODE", "async"), - name=os.getenv("NEPTUNE_RUN_NAME", None), - run=os.getenv("NEPTUNE_RUN_ID", None), - ) - combined_dict = args.to_dict() - if hasattr(model, "config") and model.config is not None: - model_config = model.config.to_dict() - combined_dict = {**model_config, **combined_dict} - self._neptune_run["parameters"] = combined_dict - self._initialized = True + def _stop_run_if_exists(self): + if self._run: + self._run.stop() + del self._run + self._run = None + + def _initialize_run(self, **additional_neptune_kwargs): + from neptune.new import init_run + from neptune.new.exceptions import NeptuneMissingApiTokenException, NeptuneMissingProjectNameException + + self._stop_run_if_exists() + + try: + self._run = init_run(**self._init_run_kwargs, **additional_neptune_kwargs) + self._run_id = self._run["sys/id"].fetch() + except (NeptuneMissingProjectNameException, NeptuneMissingApiTokenException) as e: + raise NeptuneMissingConfiguration() from e + + def _use_initial_run(self): + self._run = self._initial_run + self._is_monitoring_run = True + self._run_id = self._run["sys/id"].fetch() + self._initial_run = None + + def _ensure_run_with_monitoring(self): + if self._initial_run is not None: + self._use_initial_run() + else: + if not self._force_reset_monitoring_run and self._is_monitoring_run: + return + + if self._run and not self._is_monitoring_run and not self._force_reset_monitoring_run: + self._initialize_run(run=self._run_id) + self._is_monitoring_run = True + else: + self._initialize_run() + self._force_reset_monitoring_run = False + + def _ensure_at_least_run_without_monitoring(self): + if self._initial_run is not None: + self._use_initial_run() + else: + if not self._run: + self._initialize_run( + run=self._run_id, + capture_stdout=False, + capture_stderr=False, + capture_hardware_metrics=False, + capture_traceback=False, + ) + self._is_monitoring_run = False + + @property + def run(self): + if self._run is None: + self._ensure_at_least_run_without_monitoring() + return self._run + + @property + def _metadata_namespace(self): + return self.run[self._base_namespace_path] + + def _log_integration_version(self): + self.run[NeptuneCallback.integration_version_key] = version + + def _log_trainer_parameters(self, args): + self._metadata_namespace[NeptuneCallback.trainer_parameters_key] = args.to_sanitized_dict() + + def _log_model_parameters(self, model): + if model and hasattr(model, "config") and model.config is not None: + self._metadata_namespace[NeptuneCallback.model_parameters_key] = model.config.to_dict() + + def _log_hyper_param_search_parameters(self, state): + if state and hasattr(state, "trial_name"): + self._metadata_namespace[NeptuneCallback.trial_name_key] = state.trial_name + + if state and hasattr(state, "trial_params") and state.trial_params is not None: + self._metadata_namespace[NeptuneCallback.trial_params_key] = state.trial_params + + def _log_model_checkpoint(self, source_directory: str, checkpoint: str): + target_path = relative_path = os.path.join(source_directory, checkpoint) + + if self._volatile_checkpoints_dir is not None: + consistent_checkpoint_path = os.path.join(self._volatile_checkpoints_dir, checkpoint) + try: + shutil.copytree(relative_path, os.path.join(consistent_checkpoint_path, relative_path)) + target_path = consistent_checkpoint_path + except IOError as e: + logger.warning( + "NeptuneCallback was unable to made a copy of checkpoint due to I/O exception: '{}'." + "Could fail trying to upload.".format(e) + ) + + self._metadata_namespace[self._target_checkpoints_namespace].upload_files(target_path) + + if self._should_clean_recently_uploaded_checkpoint and self._recent_checkpoint_path is not None: + self._metadata_namespace[self._target_checkpoints_namespace].delete_files(self._recent_checkpoint_path) + + self._recent_checkpoint_path = relative_path + + def on_init_end(self, args, state, control, **kwargs): + self._volatile_checkpoints_dir = None + if self._log_checkpoints and (args.overwrite_output_dir or args.save_total_limit is not None): + self._volatile_checkpoints_dir = tempfile.TemporaryDirectory().name + + if self._log_checkpoints == "best" and not args.load_best_model_at_end: + raise ValueError("To save the best model checkpoint, the load_best_model_at_end argument must be enabled.") def on_train_begin(self, args, state, control, model=None, **kwargs): - if not self._initialized: - self.setup(args, state, model) + if not state.is_world_process_zero: + return - def on_log(self, args, state, control, logs, model=None, **kwargs): - if not self._initialized: - self.setup(args, state, model) - if state.is_world_process_zero: - for k, v in logs.items(): - self._neptune_run[k].log(v, step=state.global_step) + self._ensure_run_with_monitoring() + self._force_reset_monitoring_run = True + + self._log_integration_version() + if self._log_parameters: + self._log_trainer_parameters(args) + self._log_model_parameters(model) + + if state.is_hyper_param_search: + self._log_hyper_param_search_parameters(state) + + def on_train_end(self, args, state, control, **kwargs): + self._stop_run_if_exists() def __del__(self): - """ - Environment: - NEPTUNE_STOP_TIMEOUT (`int`, *optional*): - Number of seconsds to wait for all Neptune.ai tracking calls to finish, before stopping the tracked - run. If not set it will wait for all tracking calls to finish. - """ - try: - stop_timeout = os.getenv("NEPTUNE_STOP_TIMEOUT") - stop_timeout = int(stop_timeout) if stop_timeout else None - self._neptune_run.stop(seconds=stop_timeout) - except AttributeError: - pass + if self._volatile_checkpoints_dir is not None: + shutil.rmtree(self._volatile_checkpoints_dir, ignore_errors=True) + + self._stop_run_if_exists() + + def on_save(self, args, state, control, **kwargs): + if self._should_upload_checkpoint: + self._log_model_checkpoint(args.output_dir, f"checkpoint-{state.global_step}") + + def on_evaluate(self, args, state, control, metrics=None, **kwargs): + if self._log_checkpoints == "best": + best_metric_name = args.metric_for_best_model + if not best_metric_name.startswith("eval_"): + best_metric_name = f"eval_{best_metric_name}" + + metric_value = metrics.get(best_metric_name) + + operator = np.greater if args.greater_is_better else np.less + + self._should_upload_checkpoint = state.best_metric is None or operator(metric_value, state.best_metric) + + @classmethod + def get_run(cls, trainer): + for callback in trainer.callback_handler.callbacks: + if isinstance(callback, cls): + return callback.run + + raise Exception("The trainer doesn't have a NeptuneCallback configured.") + + def on_log(self, args, state, control, logs: Optional[Dict[str, float]] = None, **kwargs): + if not state.is_world_process_zero: + return + + if logs is not None: + for name, value in rewrite_logs(logs).items(): + if isinstance(value, (int, float)): + if name in NeptuneCallback.flat_metrics: + self._metadata_namespace[name] = value + else: + self._metadata_namespace[name].log(value, step=state.global_step) class CodeCarbonCallback(TrainerCallback): diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 4cba5773420ff7..dd5e455bcfc4e7 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -414,8 +414,8 @@ class TrainingArguments: instance of `Dataset`. report_to (`str` or `List[str]`, *optional*, defaults to `"all"`): The list of integrations to report the results and logs to. Supported platforms are `"azure_ml"`, - `"comet_ml"`, `"mlflow"`, `"tensorboard"` and `"wandb"`. Use `"all"` to report to all integrations - installed, `"none"` for no integrations. + `"comet_ml"`, `"mlflow"`, `"neptune"`, `"tensorboard"` and `"wandb"`. Use `"all"` to report to all + integrations installed, `"none"` for no integrations. ddp_find_unused_parameters (`bool`, *optional*): When using distributed training, the value of the flag `find_unused_parameters` passed to `DistributedDataParallel`. Will default to `False` if gradient checkpointing is used, `True` otherwise. From f1a6df3210695fa7311b4e8905c520cb738decbb Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Fri, 9 Sep 2022 21:14:56 +0530 Subject: [PATCH 249/539] Generate: Simplify is_pad_token_not_equal_to_eos_token_id (#18933) --- src/transformers/generation_tf_utils.py | 4 +--- src/transformers/generation_utils.py | 5 ++--- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index d5f92b51e722ec..86149db0c16ef6 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -1739,9 +1739,7 @@ def _prepare_attention_mask_for_generation( ) -> tf.Tensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64) is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id) - is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or ( - (eos_token_id is not None) and (pad_token_id != eos_token_id) - ) + is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index dcbe6e5946d24f..17de17f79e1ce9 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -495,9 +495,8 @@ def _prepare_attention_mask_for_generation( ) -> torch.LongTensor: is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long] is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs) - is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or ( - (eos_token_id is not None) and (pad_token_id != eos_token_id) - ) + is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) + # Check if input is input_ids and padded -> only then is attention_mask defined if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: return inputs.ne(pad_token_id).long() From 660e0b97bd652bd3a0dfd5f847e5cf62502d0469 Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 9 Sep 2022 20:01:02 +0100 Subject: [PATCH 250/539] Fix train_step, test_step and tests for CLIP (#18684) * Fix train_step and test_step, correctly enable CLIP fit test * Stop using get_args on older Python versions * Don't use get_origin either * UnionType is actually even newer, don't use that either * Apply the same fix to test_loss_computation * Just realized I was accidentally skipping a bunch of tests! * Fix test_loss_computation for models without separable labels * Fix scalar losses in test_step and train_step * Stop committing your breakpoints * Fix Swin loss shape * Fix Tapas loss shape * Shape fixes for TAPAS, DeIT, HuBERT and ViTMAE * Add loss computation to TFMobileBertForPreTraining * make fixup and move copied from statement * make fixup and move copied from statement * Correct copied from * Add labels and next_sentence_label inputs to TFMobileBERT * Make sure total_loss is always defined * Update tests/test_modeling_tf_common.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * Fix copied from * Ensure CTC models get labels in tests * Ensure CTC models get labels in tests * Fix tests for vit_mae * Fix tests for vit_mae * Fix tests for vit_mae * Reduce batch size for wav2vec2 testing because it was causing OOM * Skip some TAPAS tests that are failing * Skip a failing HuBERT test * make style * Fix mobilebertforpretraining test * Skip Wav2Vec2 tests that use huge amounts of mem * Skip keras_fit for Wav2Vec2 as well Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- src/transformers/modeling_tf_utils.py | 10 +- .../models/clip/modeling_tf_clip.py | 1 + .../models/deit/modeling_tf_deit.py | 1 + .../models/hubert/modeling_tf_hubert.py | 2 + .../mobilebert/modeling_tf_mobilebert.py | 45 ++- .../models/swin/modeling_tf_swin.py | 1 + .../models/tapas/modeling_tf_tapas.py | 2 +- .../models/vit_mae/modeling_tf_vit_mae.py | 1 + .../models/hubert/test_modeling_tf_hubert.py | 8 + .../mobilebert/test_modeling_tf_mobilebert.py | 12 + tests/models/tapas/test_modeling_tf_tapas.py | 14 +- .../wav2vec2/test_modeling_tf_wav2vec2.py | 18 +- tests/test_modeling_tf_common.py | 379 ++++++++++-------- 13 files changed, 313 insertions(+), 181 deletions(-) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 484417f7ad33c5..3459b3027e7e64 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -1389,7 +1389,10 @@ def train_step(self, data): # Run forward pass. with tf.GradientTape() as tape: - y_pred = self(x, training=True) + if self._using_dummy_loss and "return_loss" in arg_names: + y_pred = self(x, training=True, return_loss=True) + else: + y_pred = self(x, training=True) if self._using_dummy_loss: loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) else: @@ -1492,7 +1495,10 @@ def test_step(self, data): y = {label_to_output.get(key, key): val for key, val in y.items()} # Run forward pass. - y_pred = self(x, training=False) + if self._using_dummy_loss and "return_loss" in arg_names: + y_pred = self(x, return_loss=True, training=False) + else: + y_pred = self(x, training=False) if self._using_dummy_loss: loss = self.compiled_loss(y_pred.loss, y_pred.loss, sample_weight, regularization_losses=self.losses) else: diff --git a/src/transformers/models/clip/modeling_tf_clip.py b/src/transformers/models/clip/modeling_tf_clip.py index 8635c7d7602ee9..d302f9c7c16b1b 100644 --- a/src/transformers/models/clip/modeling_tf_clip.py +++ b/src/transformers/models/clip/modeling_tf_clip.py @@ -874,6 +874,7 @@ def call( loss = None if return_loss: loss = clip_loss(logits_per_text) + loss = tf.reshape(loss, (1,)) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) diff --git a/src/transformers/models/deit/modeling_tf_deit.py b/src/transformers/models/deit/modeling_tf_deit.py index 918a7fc03531f5..ac1cc13e96470e 100644 --- a/src/transformers/models/deit/modeling_tf_deit.py +++ b/src/transformers/models/deit/modeling_tf_deit.py @@ -852,6 +852,7 @@ def call( total_loss = tf.reduce_sum(reconstruction_loss * mask) num_masked_pixels = (tf.reduce_sum(mask) + 1e-5) * self.config.num_channels masked_im_loss = total_loss / num_masked_pixels + masked_im_loss = tf.reshape(masked_im_loss, (1,)) if not return_dict: output = (reconstructed_pixel_values,) + outputs[1:] diff --git a/src/transformers/models/hubert/modeling_tf_hubert.py b/src/transformers/models/hubert/modeling_tf_hubert.py index f078b5d0cfc7dd..c33eb504594190 100644 --- a/src/transformers/models/hubert/modeling_tf_hubert.py +++ b/src/transformers/models/hubert/modeling_tf_hubert.py @@ -1677,8 +1677,10 @@ def call( if self.config.ctc_loss_reduction == "sum": loss = tf.reduce_sum(loss) + loss = tf.reshape(loss, (1,)) if self.config.ctc_loss_reduction == "mean": loss = tf.reduce_mean(loss) + loss = tf.reshape(loss, (1,)) else: loss = None diff --git a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py index ee3e139c16170c..3a17f20202d84c 100644 --- a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py @@ -88,6 +88,37 @@ ] +# Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainingLoss +class TFMobileBertPreTrainingLoss: + """ + Loss function suitable for BERT-like pretraining, that is, the task of pretraining a language model by combining + NSP + MLM. .. note:: Any label of -100 will be ignored (along with the corresponding logits) in the loss + computation. + """ + + def hf_compute_loss(self, labels: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: + loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( + from_logits=True, reduction=tf.keras.losses.Reduction.NONE + ) + + # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway + unmasked_lm_losses = loss_fn(y_true=tf.nn.relu(labels["labels"]), y_pred=logits[0]) + # make sure only labels that are not equal to -100 + # are taken into account for the loss computation + lm_loss_mask = tf.cast(labels["labels"] != -100, dtype=unmasked_lm_losses.dtype) + masked_lm_losses = unmasked_lm_losses * lm_loss_mask + reduced_masked_lm_loss = tf.reduce_sum(masked_lm_losses) / tf.reduce_sum(lm_loss_mask) + + # Clip negative labels to zero here to avoid NaNs and errors - those positions will get masked later anyway + unmasked_ns_loss = loss_fn(y_true=tf.nn.relu(labels["next_sentence_label"]), y_pred=logits[1]) + ns_loss_mask = tf.cast(labels["next_sentence_label"] != -100, dtype=unmasked_ns_loss.dtype) + masked_ns_loss = unmasked_ns_loss * ns_loss_mask + + reduced_masked_ns_loss = tf.reduce_sum(masked_ns_loss) / tf.reduce_sum(ns_loss_mask) + + return tf.reshape(reduced_masked_lm_loss + reduced_masked_ns_loss, (1,)) + + class TFMobileBertIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) @@ -981,7 +1012,7 @@ def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOut """, MOBILEBERT_START_DOCSTRING, ) -class TFMobileBertForPreTraining(TFMobileBertPreTrainedModel): +class TFMobileBertForPreTraining(TFMobileBertPreTrainedModel, TFMobileBertPreTrainingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert") @@ -1009,6 +1040,8 @@ def call( output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + next_sentence_label: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[Tuple, TFMobileBertForPreTrainingOutput]: r""" @@ -1043,10 +1076,18 @@ def call( prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) + total_loss = None + if labels is not None and next_sentence_label is not None: + d_labels = {"labels": labels} + d_labels["next_sentence_label"] = next_sentence_label + total_loss = self.hf_compute_loss(labels=d_labels, logits=(prediction_scores, seq_relationship_score)) + if not return_dict: - return (prediction_scores, seq_relationship_score) + outputs[2:] + output = (prediction_scores, seq_relationship_score) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output return TFMobileBertForPreTrainingOutput( + loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, diff --git a/src/transformers/models/swin/modeling_tf_swin.py b/src/transformers/models/swin/modeling_tf_swin.py index 2f9bd27b0e0006..fdaefc0a3b25b0 100644 --- a/src/transformers/models/swin/modeling_tf_swin.py +++ b/src/transformers/models/swin/modeling_tf_swin.py @@ -1382,6 +1382,7 @@ def call( total_loss = tf.reduce_sum(reconstruction_loss * mask) num_masked_pixels = (tf.reduce_sum(mask) + 1e-5) * self.config.num_channels masked_im_loss = total_loss / num_masked_pixels + masked_im_loss = tf.reshape(masked_im_loss, (1,)) if not return_dict: output = (reconstructed_pixel_values,) + outputs[2:] diff --git a/src/transformers/models/tapas/modeling_tf_tapas.py b/src/transformers/models/tapas/modeling_tf_tapas.py index 93d98914f1f344..0e7539546d49dc 100644 --- a/src/transformers/models/tapas/modeling_tf_tapas.py +++ b/src/transformers/models/tapas/modeling_tf_tapas.py @@ -1431,7 +1431,7 @@ def call( logits_aggregation = self.aggregation_classifier(pooled_output) # Total loss calculation - total_loss = 0.0 + total_loss = tf.zeros(shape=(1,), dtype=tf.float32) calculate_loss = False if labels is not None: calculate_loss = True diff --git a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py index d43bfa45b1fb94..a5bf778c4c830f 100644 --- a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py @@ -1085,6 +1085,7 @@ def forward_loss(self, pixel_values, pred, mask): loss = tf.reduce_mean(loss, axis=-1) # [batch_size, num_patches], mean loss per patch loss = tf.reduce_sum(loss * mask) / tf.reduce_sum(mask) # mean loss on removed patches + loss = tf.reshape(loss, (1,)) return loss @unpack_inputs diff --git a/tests/models/hubert/test_modeling_tf_hubert.py b/tests/models/hubert/test_modeling_tf_hubert.py index 871d466d97129b..d37679831d0f8e 100644 --- a/tests/models/hubert/test_modeling_tf_hubert.py +++ b/tests/models/hubert/test_modeling_tf_hubert.py @@ -325,6 +325,10 @@ def test_model_from_pretrained(self): model = TFHubertModel.from_pretrained("facebook/hubert-base-ls960") self.assertIsNotNone(model) + @unittest.skip("Loss shapes for CTC don't match the base test.") + def test_loss_computation(self): + pass + @require_tf class TFHubertRobustModelTest(TFModelTesterMixin, unittest.TestCase): @@ -443,6 +447,10 @@ def test_model_from_pretrained(self): model = TFHubertModel.from_pretrained("facebook/hubert-large-ls960-ft") self.assertIsNotNone(model) + @unittest.skip("Loss shapes for CTC don't match the base test.") + def test_loss_computation(self): + pass + @require_tf class TFHubertUtilsTest(unittest.TestCase): diff --git a/tests/models/mobilebert/test_modeling_tf_mobilebert.py b/tests/models/mobilebert/test_modeling_tf_mobilebert.py index 1800cd3ca1432d..75334e2945091e 100644 --- a/tests/models/mobilebert/test_modeling_tf_mobilebert.py +++ b/tests/models/mobilebert/test_modeling_tf_mobilebert.py @@ -17,6 +17,7 @@ import unittest from transformers import MobileBertConfig, is_tf_available +from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow, tooslow from ...test_configuration_common import ConfigTester @@ -27,6 +28,7 @@ import tensorflow as tf from transformers import ( + TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, @@ -58,6 +60,16 @@ class TFMobileBertModelTest(TFModelTesterMixin, unittest.TestCase): test_head_masking = False test_onnx = False + # special case for ForPreTraining model, same as BERT tests + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) + + if return_labels: + if model_class in get_values(TF_MODEL_FOR_PRETRAINING_MAPPING): + inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) + + return inputs_dict + class TFMobileBertModelTester(object): def __init__( self, diff --git a/tests/models/tapas/test_modeling_tf_tapas.py b/tests/models/tapas/test_modeling_tf_tapas.py index bf5e8be370c775..2f49b57445baaf 100644 --- a/tests/models/tapas/test_modeling_tf_tapas.py +++ b/tests/models/tapas/test_modeling_tf_tapas.py @@ -362,7 +362,7 @@ def create_and_check_for_question_answering( "labels": labels, } result = model(inputs) - self.parent.assertEqual(result.loss.shape, ()) + self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) # case 2: weak supervision for aggregation (WTQ) @@ -377,7 +377,7 @@ def create_and_check_for_question_answering( "float_answer": float_answer, } result = model(inputs) - self.parent.assertEqual(result.loss.shape, ()) + self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) @@ -393,7 +393,7 @@ def create_and_check_for_question_answering( "aggregation_labels": aggregation_labels, } result = model(inputs) - self.parent.assertEqual(result.loss.shape, ()) + self.parent.assertEqual(result.loss.shape, (1,)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.logits_aggregation.shape, (self.batch_size, self.num_aggregation_labels)) @@ -502,6 +502,14 @@ def test_for_sequence_classification(self): def test_dataset_conversion(self): pass + @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") + def test_keras_fit(self): + pass + + @unittest.skip(reason="The default test gets NaN losses with the test-generated inputs") + def test_loss_computation(self): + pass + def prepare_tapas_single_inputs_for_inference(): # Here we prepare a single table-question pair to test TAPAS inference on: diff --git a/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py b/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py index 323f44ba99fb4f..3418a5a76b07fc 100644 --- a/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py +++ b/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py @@ -53,7 +53,7 @@ class TFWav2Vec2ModelTester: def __init__( self, parent, - batch_size=13, + batch_size=3, seq_length=1024, is_training=False, hidden_size=16, @@ -337,6 +337,14 @@ def test_model_from_pretrained(self): model = TFWav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) + @unittest.skip(reason="Dataset conversion goes OOM and crashes with the default options!") + def test_dataset_conversion(self): + pass + + @unittest.skip(reason="Training goes OOM and crashes with the default options!") + def test_keras_fit(self): + pass + @require_tf class TFWav2Vec2RobustModelTest(TFModelTesterMixin, unittest.TestCase): @@ -455,6 +463,14 @@ def test_model_from_pretrained(self): model = TFWav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) + @unittest.skip(reason="Dataset conversion goes OOM and crashes with the default options!") + def test_dataset_conversion(self): + pass + + @unittest.skip(reason="Training goes OOM and crashes with the default options!") + def test_keras_fit(self): + pass + @require_tf class TFWav2Vec2UtilsTest(unittest.TestCase): diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index 0ef457c03523eb..e1b21788e2475c 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -22,9 +22,10 @@ import tempfile import unittest import unittest.mock as mock +from dataclasses import fields from importlib import import_module from math import isnan -from typing import List, Tuple +from typing import List, Tuple, get_type_hints from datasets import Dataset @@ -124,6 +125,26 @@ def _config_zero_init(config): return configs_no_init +def _return_type_has_loss(model): + return_type = get_type_hints(model.call) + if "return" not in return_type: + return False + return_type = return_type["return"] + if hasattr(return_type, "__args__"): # Awkward check for union because UnionType only turns up in 3.10 + for type_annotation in return_type.__args__: + if inspect.isclass(type_annotation) and issubclass(type_annotation, ModelOutput): + field_names = [field.name for field in fields(type_annotation)] + if "loss" in field_names: + return True + return False + elif isinstance(return_type, tuple): + return False + elif isinstance(return_type, ModelOutput): + class_fields = fields(return_type) + return "loss" in class_fields + return False + + @require_tf class TFModelTesterMixin: @@ -170,7 +191,7 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> d *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING), - ]: + ] and "labels" in dict(inspect.signature(model_class.call).parameters): inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) @@ -182,6 +203,11 @@ def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> d elif model_class in get_values(TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING): batch_size, num_channels, height, width = inputs_dict["pixel_values"].shape inputs_dict["labels"] = tf.zeros((self.model_tester.batch_size, height, width), dtype=tf.int32) + elif model_class.__name__.endswith("ForCTC"): + # When we have enough CTC models for an AutoClass, we should use their mapping instead of name checks + inputs_dict["labels"] = tf.zeros( + (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 + ) return inputs_dict @@ -1335,72 +1361,74 @@ def test_loss_computation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) - if getattr(model, "hf_compute_loss", None): - # The number of elements in the loss should be the same as the number of elements in the label - prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) - added_label = prepared_for_class[ - sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0] - ] - expected_loss_size = added_label.shape.as_list()[:1] - - # Test that model correctly compute the loss with kwargs - prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) - possible_input_names = {"input_ids", "pixel_values", "input_features"} - input_name = possible_input_names.intersection(set(prepared_for_class)).pop() - model_input = prepared_for_class.pop(input_name) - - loss = model(model_input, **prepared_for_class)[0] - self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) - - # Test that model correctly compute the loss when we mask some positions - prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) - possible_input_names = {"input_ids", "pixel_values", "input_features"} - input_name = possible_input_names.intersection(set(prepared_for_class)).pop() - model_input = prepared_for_class.pop(input_name) - if "labels" in prepared_for_class: - labels = prepared_for_class["labels"].numpy() - if len(labels.shape) > 1 and labels.shape[1] != 1: - labels[0] = -100 - prepared_for_class["labels"] = tf.convert_to_tensor(labels) - loss = model(model_input, **prepared_for_class)[0] - self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) - self.assertTrue(not np.any(np.isnan(loss.numpy()))) - - # Test that model correctly compute the loss with a dict - prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) - loss = model(prepared_for_class)[0] - self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) - - # Test that model correctly compute the loss with a tuple - prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) - - # Get keys that were added with the _prepare_for_class function - label_keys = prepared_for_class.keys() - inputs_dict.keys() - signature = inspect.signature(model.call).parameters - signature_names = list(signature.keys()) - - # Create a dictionary holding the location of the tensors in the tuple - tuple_index_mapping = {0: input_name} - for label_key in label_keys: - label_key_index = signature_names.index(label_key) - tuple_index_mapping[label_key_index] = label_key - sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) - # Initialize a list with their default values, update the values and convert to a tuple - list_input = [] - - for name in signature_names: - if name != "kwargs": - list_input.append(signature[name].default) - - for index, value in sorted_tuple_index_mapping: - list_input[index] = prepared_for_class[value] - - tuple_input = tuple(list_input) - - # Send to model - loss = model(tuple_input[:-1])[0] - - self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + if not getattr(model, "hf_compute_loss", None) and not _return_type_has_loss(model): + continue + # The number of elements in the loss should be the same as the number of elements in the label + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + added_label_names = sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True) + if not added_label_names: + continue # This test is only for models with easily-separable labels + added_label = prepared_for_class[added_label_names[0]] + expected_loss_size = added_label.shape.as_list()[:1] + + # Test that model correctly compute the loss with kwargs + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} + input_name = possible_input_names.intersection(set(prepared_for_class)).pop() + model_input = prepared_for_class.pop(input_name) + + loss = model(model_input, **prepared_for_class)[0] + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + + # Test that model correctly compute the loss when we mask some positions + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + possible_input_names = {"input_ids", "pixel_values", "input_features", "input_values"} + input_name = possible_input_names.intersection(set(prepared_for_class)).pop() + model_input = prepared_for_class.pop(input_name) + if "labels" in prepared_for_class: + labels = prepared_for_class["labels"].numpy() + if len(labels.shape) > 1 and labels.shape[1] != 1: + labels[0] = -100 + prepared_for_class["labels"] = tf.convert_to_tensor(labels) + loss = model(model_input, **prepared_for_class)[0] + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + self.assertTrue(not np.any(np.isnan(loss.numpy()))) + + # Test that model correctly compute the loss with a dict + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + loss = model(prepared_for_class)[0] + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) + + # Test that model correctly compute the loss with a tuple + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + + # Get keys that were added with the _prepare_for_class function + label_keys = prepared_for_class.keys() - inputs_dict.keys() + signature = inspect.signature(model.call).parameters + signature_names = list(signature.keys()) + + # Create a dictionary holding the location of the tensors in the tuple + tuple_index_mapping = {0: input_name} + for label_key in label_keys: + label_key_index = signature_names.index(label_key) + tuple_index_mapping[label_key_index] = label_key + sorted_tuple_index_mapping = sorted(tuple_index_mapping.items()) + # Initialize a list with their default values, update the values and convert to a tuple + list_input = [] + + for name in signature_names: + if name != "kwargs": + list_input.append(signature[name].default) + + for index, value in sorted_tuple_index_mapping: + list_input[index] = prepared_for_class[value] + + tuple_input = tuple(list_input) + + # Send to model + loss = model(tuple_input[:-1])[0] + + self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1]) def check_keras_fit_results(self, val_loss1, val_loss2, atol=1e-2, rtol=1e-3): self.assertTrue(np.allclose(val_loss1, val_loss2, atol=atol, rtol=rtol)) @@ -1409,111 +1437,118 @@ def test_keras_fit(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) - if getattr(model, "hf_compute_loss", None): - # Test that model correctly compute the loss with kwargs - prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) - # Is there a better way to remove these decoder inputs? - prepared_for_class = { - key: val - for key, val in prepared_for_class.items() - if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "decoder_input_ids") - } + if not getattr(model, "hf_compute_loss", False) and not _return_type_has_loss(model): + continue + # Test that model correctly compute the loss with kwargs + prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) + # Is there a better way to remove these decoder inputs? + # We also remove "return_loss" as this is covered by the train_step when using fit() + prepared_for_class = { + key: val + for key, val in prepared_for_class.items() + if key + not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "decoder_input_ids", "return_loss") + } - possible_label_cols = { - "labels", - "label", - "label_ids", - "start_positions", - "start_position", - "end_positions", - "end_position", - "next_sentence_label", - } - label_names = possible_label_cols.intersection(set(prepared_for_class)) - self.assertGreater(len(label_names), 0, msg="No matching label names found!") - labels = {key: val for key, val in prepared_for_class.items() if key in label_names} - inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} - self.assertGreater(len(inputs_minus_labels), 0) - accuracy_classes = [ - "ForPreTraining", - "ForCausalLM", - "ForMaskedLM", - "ForQuestionAnswering", - "ForMultipleChoice", - "ForSequenceClassification", - "ForTokenClassification", - "ForNextSentencePrediction", - "LMHeadModel", - ] - for accuracy_class in accuracy_classes: - if model.__class__.__name__.endswith(accuracy_class): - metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] - break - else: - metrics = [] - - model(model.dummy_inputs) # Build the model so we can get some constant weights - model_weights = model.get_weights() - - # Run eagerly to save some expensive compilation times - model.compile(optimizer=tf.keras.optimizers.SGD(0.0), run_eagerly=True, metrics=metrics) - # Make sure the model fits without crashing regardless of where we pass the labels - history1 = model.fit( - prepared_for_class, - validation_data=prepared_for_class, - steps_per_epoch=1, - validation_steps=1, - shuffle=False, - ) - val_loss1 = history1.history["val_loss"][0] - self.assertTrue(not isnan(val_loss1)) - accuracy1 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")} - - # We reinitialize the model here even though our learning rate was zero - # because BatchNorm updates weights by means other than gradient descent. - model.set_weights(model_weights) - - history2 = model.fit( - inputs_minus_labels, - labels, - validation_data=(inputs_minus_labels, labels), - steps_per_epoch=1, - validation_steps=1, - shuffle=False, - ) - val_loss2 = history2.history["val_loss"][0] - self.assertTrue(not isnan(val_loss2)) - accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")} - self.check_keras_fit_results(val_loss1, val_loss2) - self.assertEqual(history1.history.keys(), history2.history.keys()) - for key in history1.history.keys(): - if not key.startswith("val_"): - self.assertTrue("val_" + key in history1.history.keys(), "Outputs differ in train/test step!") - if metrics: - self.assertTrue(len(accuracy1) == len(accuracy2) > 0, "Missing metrics!") - - # Make sure fit works with tf.data.Dataset and results are consistent - dataset = tf.data.Dataset.from_tensor_slices(prepared_for_class) - # Pass in all samples as a batch to match other `fit` calls - dataset = dataset.batch(len(dataset)) - - # Reinitialize to fix batchnorm again - model.set_weights(model_weights) - - history3 = model.fit( - dataset, - validation_data=dataset, - steps_per_epoch=1, - validation_steps=1, - shuffle=False, - ) - val_loss3 = history3.history["val_loss"][0] - self.assertTrue(not isnan(val_loss3)) - accuracy3 = {key: val[0] for key, val in history3.history.items() if key.endswith("accuracy")} - self.check_keras_fit_results(val_loss1, val_loss3) - self.assertEqual(history1.history.keys(), history3.history.keys()) - if metrics: - self.assertTrue(len(accuracy1) == len(accuracy3) > 0, "Missing metrics!") + accuracy_classes = [ + "ForPreTraining", + "ForCausalLM", + "ForMaskedLM", + "ForQuestionAnswering", + "ForMultipleChoice", + "ForSequenceClassification", + "ForTokenClassification", + "ForNextSentencePrediction", + "LMHeadModel", + ] + for accuracy_class in accuracy_classes: + if model.__class__.__name__.endswith(accuracy_class): + metrics = [tf.keras.metrics.SparseCategoricalAccuracy()] + break + else: + metrics = [] + + model(model.dummy_inputs) # Build the model so we can get some constant weights + model_weights = model.get_weights() + + # Run eagerly to save some expensive compilation times + model.compile(optimizer=tf.keras.optimizers.SGD(0.0), run_eagerly=True, metrics=metrics) + # Make sure the model fits without crashing regardless of where we pass the labels + history1 = model.fit( + prepared_for_class, + validation_data=prepared_for_class, + steps_per_epoch=1, + validation_steps=1, + shuffle=False, + ) + val_loss1 = history1.history["val_loss"][0] + self.assertTrue(not isnan(val_loss1)) + accuracy1 = {key: val[0] for key, val in history1.history.items() if key.endswith("accuracy")} + + possible_label_cols = { + "labels", + "label", + "label_ids", + "start_positions", + "start_position", + "end_positions", + "end_position", + "next_sentence_label", + } + label_names = possible_label_cols.intersection(set(prepared_for_class)) + if len(label_names) == 0: + # The next tests only make sense for models with separate inputs and labels, and do not make + # sense for models that don't clearly distinguish between the two (e.g. CLIP) + return + labels = {key: val for key, val in prepared_for_class.items() if key in label_names} + inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} + self.assertGreater(len(inputs_minus_labels), 0) + + # We reinitialize the model here even though our learning rate was zero + # because BatchNorm updates weights by means other than gradient descent. + model.set_weights(model_weights) + + history2 = model.fit( + inputs_minus_labels, + labels, + validation_data=(inputs_minus_labels, labels), + steps_per_epoch=1, + validation_steps=1, + shuffle=False, + ) + val_loss2 = history2.history["val_loss"][0] + self.assertTrue(not isnan(val_loss2)) + accuracy2 = {key: val[0] for key, val in history2.history.items() if key.endswith("accuracy")} + self.check_keras_fit_results(val_loss1, val_loss2) + self.assertEqual(history1.history.keys(), history2.history.keys()) + for key in history1.history.keys(): + if not key.startswith("val_"): + self.assertTrue("val_" + key in history1.history.keys(), "Outputs differ in train/test step!") + if metrics: + self.assertTrue(len(accuracy1) == len(accuracy2) > 0, "Missing metrics!") + + # Make sure fit works with tf.data.Dataset and results are consistent + dataset = tf.data.Dataset.from_tensor_slices(prepared_for_class) + # Pass in all samples as a batch to match other `fit` calls + dataset = dataset.batch(len(dataset)) + + # Reinitialize to fix batchnorm again + model.set_weights(model_weights) + + history3 = model.fit( + dataset, + validation_data=dataset, + steps_per_epoch=1, + validation_steps=1, + shuffle=False, + ) + val_loss3 = history3.history["val_loss"][0] + self.assertTrue(not isnan(val_loss3)) + accuracy3 = {key: val[0] for key, val in history3.history.items() if key.endswith("accuracy")} + self.check_keras_fit_results(val_loss1, val_loss3) + self.assertEqual(history1.history.keys(), history3.history.keys()) + if metrics: + self.assertTrue(len(accuracy1) == len(accuracy3) > 0, "Missing metrics!") def test_int64_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() From 645f17428606bdec214b6fbd60b3ffc5d1a3ab9a Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 9 Sep 2022 15:07:09 -0400 Subject: [PATCH 251/539] Exit early in load if no weights are in the sharded state dict (#18937) --- src/transformers/modeling_utils.py | 35 ++++++++++++++++-------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 3a3eb3edc14d32..68fe7f94d2c7f4 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -418,22 +418,25 @@ def _load_state_dict_into_model(model_to_load, state_dict, start_prefix): def load(module: nn.Module, state_dict, prefix=""): local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {}) args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) - if is_deepspeed_zero3_enabled(): - import deepspeed - - # In sharded models, each shard has only part of the full state_dict, so only gather - # parameters that are in the current state_dict. - named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False)) - params_to_gather = [named_parameters[k] for k in state_dict.keys() if k in named_parameters] - if len(params_to_gather) > 0: - # because zero3 puts placeholders in model params, this context - # manager gathers (unpartitions) the params of the current layer, then loads from - # the state dict and then re-partitions them again - with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0): - if torch.distributed.get_rank() == 0: - module._load_from_state_dict(*args) - else: - module._load_from_state_dict(*args) + # Parameters of module and children will start with prefix. We can exit early if there are none in this + # state_dict + if len([key for key in state_dict if key.startswith(prefix)]) > 0: + if is_deepspeed_zero3_enabled(): + import deepspeed + + # In sharded models, each shard has only part of the full state_dict, so only gather + # parameters that are in the current state_dict. + named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False)) + params_to_gather = [named_parameters[k] for k in state_dict.keys() if k in named_parameters] + if len(params_to_gather) > 0: + # because zero3 puts placeholders in model params, this context + # manager gathers (unpartitions) the params of the current layer, then loads from + # the state dict and then re-partitions them again + with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0): + if torch.distributed.get_rank() == 0: + module._load_from_state_dict(*args) + else: + module._load_from_state_dict(*args) for name, child in module._modules.items(): if child is not None: From 855dcae8bb743c3f8f0781742d7fa2fa3aaa3e22 Mon Sep 17 00:00:00 2001 From: Bram Vanroy Date: Fri, 9 Sep 2022 23:30:05 +0200 Subject: [PATCH 252/539] update black target version (#18955) * update black target version * add comment as per https://github.com/huggingface/transformers/pull/18955#issuecomment-1242081649 * revert change Will only update to 3.7 after black 2023 upgrade in January --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 8f101357e8ce8f..3aa0a86a4f607e 100644 --- a/setup.py +++ b/setup.py @@ -98,7 +98,7 @@ _deps = [ "Pillow", "accelerate>=0.10.0", - "black==22.3", + "black==22.3", # after updating to black 2023, also update Python version in pyproject.toml to 3.7 "codecarbon==1.2.0", "cookiecutter==1.7.3", "dataclasses", From 00cbadb870fb74b0eee4197fe9b62afbca457670 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Sat, 10 Sep 2022 11:34:49 +0100 Subject: [PATCH 253/539] RFC: Replace custom TF embeddings by Keras embeddings (#18939) --- src/transformers/modeling_tf_utils.py | 107 +++++++++++++++++- .../models/bart/modeling_tf_bart.py | 74 ++++-------- .../models/mbart/modeling_tf_mbart.py | 3 +- tests/models/bart/test_modeling_tf_bart.py | 67 +---------- tests/test_modeling_tf_common.py | 32 ++---- 5 files changed, 141 insertions(+), 142 deletions(-) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 3459b3027e7e64..2c1febd43c8d20 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -887,6 +887,12 @@ def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, # If not, make the value to None saved_weight_value = saved_weights.get(symbolic_weight_name, None) + # Retrocompatibility patch: some embeddings are stored with the weights name (e.g. Bart's + # `model.shared/embeddings:0` are stored as `model.shared/weights:0`) + if saved_weight_value is None and symbolic_weight_name.endswith("embeddings:0"): + symbolic_weight_name = symbolic_weight_name[:-12] + "weight:0" + saved_weight_value = saved_weights.get(symbolic_weight_name, None) + # Add the updated name to the final list for computing missing/unexpected values symbolic_weights_names.add(symbolic_weight_name) @@ -1700,7 +1706,9 @@ def get_lm_head(self) -> tf.keras.layers.Layer: """ return None - def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable: + def resize_token_embeddings( + self, new_num_tokens: Optional[int] = None + ) -> Union[tf.keras.layers.Embedding, tf.Variable]: """ Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. @@ -1710,11 +1718,17 @@ def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable: new_num_tokens (`int`, *optional*): The number of new tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just - returns a pointer to the input tokens `tf.Variable` module of the model without doing anything. + returns a pointer to the input tokens without doing anything. Return: - `tf.Variable`: Pointer to the input tokens Embeddings Module of the model. + `tf.Variable` or `tf.keras.layers.Embedding`: Pointer to the input tokens of the model. """ + # TODO (joao): flagged for replacement (by `_v2_resized_token_embeddings`) due to embeddings refactor + + # Run the new code path if the model has a keras embeddings layer + if isinstance(self.get_input_embeddings(), tf.keras.layers.Embedding): + return self._v2_resized_token_embeddings(new_num_tokens) + if new_num_tokens is None or new_num_tokens == self.config.vocab_size: return self._get_word_embedding_weight(self.get_input_embeddings()) @@ -1725,7 +1739,32 @@ def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable: return model_embeds + def _v2_resized_token_embeddings(self, new_num_tokens: Optional[int] = None) -> tf.keras.layers.Embedding: + """ + Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. + + Arguments: + new_num_tokens (`int`, *optional*): + The number of new tokens in the embedding matrix. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just + returns a pointer to the input tokens without doing anything. + + Return: + `tf.keras.layers.Embedding`: Pointer to the input tokens of the model. + """ + if new_num_tokens is None or new_num_tokens == self.config.vocab_size: + return self.get_input_embeddings() + + model_embeds = self._v2_resize_token_embeddings(new_num_tokens) + + # Update base model and current model config + self.config.vocab_size = new_num_tokens + + return model_embeds + def _get_word_embedding_weight(model, embedding_layer): + # TODO (joao): flagged for delection due to embeddings refactor + # If the variable holds the weights themselves, return them if isinstance(embedding_layer, tf.Tensor): return embedding_layer @@ -1755,6 +1794,7 @@ def _get_word_embedding_weight(model, embedding_layer): return None def _resize_token_embeddings(self, new_num_tokens): + # TODO (joao): flagged for replacement (by `_v2_resize_token_embeddings`) due to embeddings refactor old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings()) new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) @@ -1776,6 +1816,27 @@ def _resize_token_embeddings(self, new_num_tokens): return self.get_input_embeddings() + def _v2_resize_token_embeddings(self, new_num_tokens): + old_embeddings = self.get_input_embeddings() + new_embeddings = self._v2_get_resized_embeddings(old_embeddings, new_num_tokens) + self.set_input_embeddings(new_embeddings) + + # If word embeddings are not tied, make sure that lm head bias is resized as well + if self.get_bias() is not None: + old_lm_head_bias = self.get_bias() + new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) + self.set_bias(new_lm_head_bias) + + # If word embeddings are not tied, make sure that lm head decoder is resized as well. + tied_weights = self.get_input_embeddings() == self.get_output_embeddings() + if self.get_output_embeddings() is not None and not tied_weights: + old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings()) + # TODO (joao): this one probably needs a v2 version with other models + new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens) + self.set_output_embeddings(new_lm_head_decoder) + + return self.get_input_embeddings() + def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): """ Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. @@ -1885,6 +1946,7 @@ def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Var `tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is `None` """ + # TODO (joao): flagged for replacement (by `_v2_get_resized_embeddings`) due to embeddings refactor old_embedding_dim = shape_list(old_embeddings)[1] init_range = getattr(self.config, "initializer_range", 0.02) embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens) @@ -1900,6 +1962,42 @@ def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Var return new_embeddings + def _v2_get_resized_embeddings( + self, old_embeddings: tf.keras.layers.Embedding, new_num_tokens: int + ) -> tf.keras.layers.Embedding: + """ + Build a resized Embedding layer from a provided Embedding layer. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. + + Args: + old_embeddings (`tf.keras.layers.Embedding`): + Old embeddings to be resized. + new_num_tokens (`int`, *optional*): + New number of tokens in the embedding matrix. + + Return: + `tf.keras.layers.Embedding`: Resized Embedding layer. + """ + # Get a new (initialized) embeddings layer + init_range = getattr(self.config, "initializer_range", 0.02) + new_embeddings = tf.keras.layers.Embedding( + input_dim=new_num_tokens, + output_dim=old_embeddings.output_dim, + embeddings_initializer=get_initializer(init_range), + name=old_embeddings.embeddings.name[:-13], # exact same scoped name except "/embeddings:0" + ) + new_embeddings(tf.constant([[0]])) + + # Copy the old embeddings to the new embeddings + if old_embeddings.input_dim >= new_num_tokens: + init_embeddings = old_embeddings.embeddings[:new_num_tokens] + else: + init_embeddings = tf.concat( + [old_embeddings.embeddings, new_embeddings.embeddings[old_embeddings.input_dim :]], axis=0 + ) + new_embeddings.embeddings.assign(init_embeddings) + return new_embeddings + def prune_heads(self, heads_to_prune): """ Prunes heads of the base model. @@ -2632,6 +2730,7 @@ class TFSharedEmbeddings(tf.keras.layers.Layer): kwargs: Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`. """ + # TODO (joao): flagged for delection due to embeddings refactor def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs): super().__init__(**kwargs) @@ -2848,6 +2947,8 @@ class TFWrappedEmbeddings: saving/storing the correct weights """ + # TODO (joao): flagged for delection due to embeddings refactor + def __init__(self, layer, abs_scope_name=None): self._layer = layer self._abs_scope_name = abs_scope_name diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index c15d0ae50451ae..17c0ce7a710502 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -35,8 +35,6 @@ TFCausalLanguageModelingLoss, TFModelInputType, TFPreTrainedModel, - TFSharedEmbeddings, - TFWrappedEmbeddings, keras_serializable, unpack_inputs, ) @@ -113,7 +111,7 @@ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): return (one_cst - expanded_mask) * LARGE_NEGATIVE -class TFBartLearnedPositionalEmbedding(TFSharedEmbeddings): +class TFBartLearnedPositionalEmbedding(tf.keras.layers.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ @@ -136,7 +134,8 @@ def call( position_ids = tf.range(seq_len, delta=1, name="range") position_ids += past_key_values_length - return super().call(position_ids + self.offset) + offset_dtype = position_ids.dtype if isinstance(position_ids, tf.Tensor) else tf.int32 + return super().call(position_ids + tf.constant(self.offset, dtype=offset_dtype)) class TFBartAttention(tf.keras.layers.Layer): @@ -667,7 +666,7 @@ class TFBartEncoder(tf.keras.layers.Layer): config: BartConfig """ - def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs): + def __init__(self, config: BartConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = tf.keras.layers.Dropout(config.dropout) @@ -685,12 +684,6 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings self.layers = [TFBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") - def get_embed_tokens(self): - return self.embed_tokens - - def set_embed_tokens(self, embed_tokens): - self.embed_tokens = embed_tokens - @unpack_inputs def call( self, @@ -750,7 +743,8 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + with tf.name_scope(self.embed_tokens.name + "/"): + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos @@ -820,7 +814,7 @@ class TFBartDecoder(tf.keras.layers.Layer): embed_tokens: output embedding """ - def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs): + def __init__(self, config: BartConfig, embed_tokens: Optional[tf.keras.layers.Embedding] = None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id @@ -837,12 +831,6 @@ def __init__(self, config: BartConfig, embed_tokens: Optional[TFSharedEmbeddings self.dropout = tf.keras.layers.Dropout(config.dropout) - def get_embed_tokens(self): - return self.embed_tokens - - def set_embed_tokens(self, embed_tokens): - self.embed_tokens = embed_tokens - @unpack_inputs def call( self, @@ -943,7 +931,8 @@ def call( positions = self.embed_positions(input_shape, position_ids=position_ids) if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + with tf.name_scope(self.embed_tokens.name + "/"): + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds @@ -1038,36 +1027,19 @@ class TFBartMainLayer(tf.keras.layers.Layer): def __init__(self, config: BartConfig, load_weight_prefix=None, **kwargs): super().__init__(**kwargs) self.config = config - self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name="model.shared") - - # set tf scope correctly - if load_weight_prefix is None: - load_weight_prefix = "model.shared" - - with tf.compat.v1.variable_scope(load_weight_prefix) as shared_abs_scope_name: - pass + load_weight_prefix = "model.shared" if load_weight_prefix is None else load_weight_prefix + self.shared = tf.keras.layers.Embedding(config.vocab_size, config.d_model, name=load_weight_prefix) - # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope. - embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name) - embed_tokens.vocab_size = self.shared.vocab_size - embed_tokens.hidden_size = self.shared.hidden_size - - self.encoder = TFBartEncoder(config, embed_tokens, name="encoder") - self.decoder = TFBartDecoder(config, embed_tokens, name="decoder") + self.encoder = TFBartEncoder(config, self.shared, name="encoder") + self.decoder = TFBartDecoder(config, self.shared, name="decoder") def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): - self.shared.weight = new_embeddings - self.shared.vocab_size = self.shared.weight.shape[0] - # retrieve correct absolute scope for embed token wrapper - with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name: - pass - # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope. - embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name) - self.encoder.set_embed_tokens(embed_tokens) - self.decoder.set_embed_tokens(embed_tokens) + self.shared = new_embeddings + self.encoder.embed_tokens = self.shared + self.decoder.embed_tokens = self.shared @unpack_inputs def call( @@ -1273,11 +1245,7 @@ def call(self, x): BART_START_DOCSTRING, ) class TFBartForConditionalGeneration(TFBartPretrainedModel, TFCausalLanguageModelingLoss): - _keys_to_ignore_on_load_unexpected = [ - r"model.encoder.embed_tokens.weight", - r"model.decoder.embed_tokens.weight", - ] - + _keys_to_ignore_on_load_missing = [r"final_logits_bias"] _requires_load_weight_prefix = True def __init__(self, config, load_weight_prefix=None, *inputs, **kwargs): @@ -1303,10 +1271,10 @@ def set_output_embeddings(self, value): self.set_input_embeddings(value) def get_bias(self): - return {"final_logits_bias": self.final_logits_bias} + return {"final_logits_bias": self.bias_layer.bias} def set_bias(self, value): - self.final_logits_bias = value["final_logits_bias"] + self.bias_layer.bias = value["final_logits_bias"] @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @@ -1374,7 +1342,9 @@ def call( return_dict=return_dict, training=training, ) - lm_logits = self.model.shared(outputs[0], mode="linear") + # TODO (joao): the line below is for models with tied embeddings. The previous TFBart had tied embeddings. + # The PT Bart does not have tied embeddings. Untie the weights while keeping loading retrocompatibility. + lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index 47bad2e21eb272..3f6a44fcf4d096 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -137,7 +137,8 @@ def call( position_ids = tf.range(seq_len, delta=1, name="range") position_ids += past_key_values_length - return super().call(position_ids + self.offset) + offset_dtype = position_ids.dtype if isinstance(position_ids, tf.Tensor) else tf.int32 + return super().call(position_ids + tf.constant(self.offset, dtype=offset_dtype)) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->MBart diff --git a/tests/models/bart/test_modeling_tf_bart.py b/tests/models/bart/test_modeling_tf_bart.py index 5e5c5ee592a119..db06c84e0f5b86 100644 --- a/tests/models/bart/test_modeling_tf_bart.py +++ b/tests/models/bart/test_modeling_tf_bart.py @@ -230,69 +230,6 @@ def test_model_common_attributes(self): name = model.get_bias() assert name is None - def test_resize_token_embeddings(self): - config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() - - def _get_word_embedding_weight(model, embedding_layer): - if hasattr(embedding_layer, "weight"): - return embedding_layer.weight - else: - # Here we build the word embeddings weights if not exists. - # And then we retry to get the attribute once built. - model(model.dummy_inputs) - if hasattr(embedding_layer, "weight"): - return embedding_layer.weight - else: - return None - - for model_class in self.all_model_classes: - for size in [config.vocab_size - 10, config.vocab_size + 10, None]: - # build the embeddings - model = model_class(config=config) - old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) - old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) - old_final_logits_bias = model.get_bias() - - # reshape the embeddings - model.resize_token_embeddings(size) - new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) - new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) - new_final_logits_bias = model.get_bias() - - # check that the resized embeddings size matches the desired size. - assert_size = size if size is not None else config.vocab_size - - self.assertEqual(new_input_embeddings.shape[0], assert_size) - - # check that weights remain the same after resizing - models_equal = True - for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): - if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: - models_equal = False - self.assertTrue(models_equal) - - if old_output_embeddings is not None and new_output_embeddings is not None: - self.assertEqual(new_output_embeddings.shape[0], assert_size) - - models_equal = True - for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): - if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: - models_equal = False - self.assertTrue(models_equal) - - if old_final_logits_bias is not None and new_final_logits_bias is not None: - old_final_logits_bias = old_final_logits_bias["final_logits_bias"] - new_final_logits_bias = new_final_logits_bias["final_logits_bias"] - self.assertEqual(new_final_logits_bias.shape[0], 1) - self.assertEqual(new_final_logits_bias.shape[1], assert_size) - - models_equal = True - for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()): - for p1, p2 in zip(old, new): - if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: - models_equal = False - self.assertTrue(models_equal) - @tooslow def test_saved_model_creation(self): pass @@ -635,7 +572,7 @@ def xsum_1_1_model(self): def test_xsum_1_1_generation(self): model = self.xsum_1_1_model - assert model.model.decoder.embed_tokens._layer == model.model.shared + assert model.model.decoder.embed_tokens == model.model.shared ARTICLE = ( "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" @@ -685,7 +622,7 @@ def test_xsum_1_1_generation(self): def test_xsum_1_1_xla_generation(self): # same test as above, but with `no_repeat_ngram_size=0` (not compatible with XLA) and XLA comparison enabled model = self.xsum_1_1_model - assert model.model.decoder.embed_tokens._layer == model.model.shared + assert model.model.decoder.embed_tokens == model.model.shared ARTICLE = ( "The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index e1b21788e2475c..ca8840d2aafc15 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -1144,30 +1144,20 @@ def prepare_numpy_arrays(inputs_dict): self.assert_outputs_same(output_for_dict_input, output_for_kw_input) def test_resize_token_embeddings(self): + # TODO (joao): after the embeddings refactor is complete, rework this test so as to rely exclusively on + # tf.keras.layers.Embedding + if not self.test_resize_embeddings: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): - embeds = getattr(embedding_layer, "weight", None) - if embeds is not None: - return embeds - - embeds = getattr(embedding_layer, "decoder", None) - if embeds is not None: - return embeds - - model(model.dummy_inputs) - - embeds = getattr(embedding_layer, "weight", None) - if embeds is not None: - return embeds - - embeds = getattr(embedding_layer, "decoder", None) - if embeds is not None: - return embeds - - return None + if isinstance(embedding_layer, tf.keras.layers.Embedding): + # builds the embeddings layer + model(model.dummy_inputs) + return embedding_layer.embeddings + else: + return model._get_word_embedding_weight(embedding_layer) for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: @@ -1195,10 +1185,10 @@ def _get_word_embedding_weight(model, embedding_layer): if old_bias is not None and new_bias is not None: for old_weight, new_weight in zip(old_bias.values(), new_bias.values()): - self.assertEqual(new_weight.shape[0], assert_size) + self.assertEqual(new_weight.shape[-1], assert_size) models_equal = True - for p1, p2 in zip(old_weight.value(), new_weight.value()): + for p1, p2 in zip(tf.squeeze(old_weight), tf.squeeze(new_weight)): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) From d8cf3b20875baee97f4bea64ffd17670aa57c37b Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Sat, 10 Sep 2022 13:33:01 +0100 Subject: [PATCH 254/539] TF: unpin maximum TF version (#18917) --- docker/transformers-all-latest-gpu/Dockerfile | 2 +- docker/transformers-cpu/Dockerfile | 4 ++-- docker/transformers-gpu/Dockerfile | 2 +- docker/transformers-tensorflow-cpu/Dockerfile | 4 ++-- docker/transformers-tensorflow-gpu/Dockerfile | 2 +- setup.py | 4 ++-- src/transformers/dependency_versions_table.py | 4 ++-- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index 502c9a61fd6c4a..4db6f51826f02b 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -32,7 +32,7 @@ RUN echo torch=$VERSION # TODO: We might need to specify proper versions that work with a specific torch version (especially for past CI). RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA -RUN python3 -m pip install --no-cache-dir -U tensorflow==2.9.1 +RUN python3 -m pip install --no-cache-dir -U tensorflow RUN python3 -m pip uninstall -y flax jax # Use installed torch version for `torch-scatter` to avid to deal with PYTORCH='pre'. diff --git a/docker/transformers-cpu/Dockerfile b/docker/transformers-cpu/Dockerfile index 75a4f20a3b18fd..c3590e4239e470 100644 --- a/docker/transformers-cpu/Dockerfile +++ b/docker/transformers-cpu/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ jupyter \ - tensorflow-cpu==2.9.1 \ + tensorflow-cpu \ torch WORKDIR /workspace @@ -23,4 +23,4 @@ COPY . transformers/ RUN cd transformers/ && \ python3 -m pip install --no-cache-dir . -CMD ["/bin/bash"] \ No newline at end of file +CMD ["/bin/bash"] diff --git a/docker/transformers-gpu/Dockerfile b/docker/transformers-gpu/Dockerfile index fc5c818438ba25..0212eaa2a72b26 100644 --- a/docker/transformers-gpu/Dockerfile +++ b/docker/transformers-gpu/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ jupyter \ - tensorflow==2.9.1 \ + tensorflow \ torch RUN git clone https://github.com/NVIDIA/apex diff --git a/docker/transformers-tensorflow-cpu/Dockerfile b/docker/transformers-tensorflow-cpu/Dockerfile index dbc81acbbb25a4..ef3dc3d212cbbc 100644 --- a/docker/transformers-tensorflow-cpu/Dockerfile +++ b/docker/transformers-tensorflow-cpu/Dockerfile @@ -15,11 +15,11 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ mkl \ - tensorflow-cpu==2.9.1 + tensorflow-cpu WORKDIR /workspace COPY . transformers/ RUN cd transformers/ && \ python3 -m pip install --no-cache-dir . -CMD ["/bin/bash"] \ No newline at end of file +CMD ["/bin/bash"] diff --git a/docker/transformers-tensorflow-gpu/Dockerfile b/docker/transformers-tensorflow-gpu/Dockerfile index a24faad8f2fcfb..a05ace7d08e268 100644 --- a/docker/transformers-tensorflow-gpu/Dockerfile +++ b/docker/transformers-tensorflow-gpu/Dockerfile @@ -12,7 +12,7 @@ RUN git clone https://github.com/huggingface/transformers && cd transformers && RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-tensorflow,testing] # If set to nothing, will install the latest version -ARG TENSORFLOW='2.9.1' +ARG TENSORFLOW='' RUN [ ${#TENSORFLOW} -gt 0 ] && VERSION='tensorflow=='$TENSORFLOW'.*' || VERSION='tensorflow'; python3 -m pip install --no-cache-dir -U $VERSION RUN python3 -m pip uninstall -y torch flax diff --git a/setup.py b/setup.py index 3aa0a86a4f607e..3145272ef6f061 100644 --- a/setup.py +++ b/setup.py @@ -154,8 +154,8 @@ "sigopt", "librosa", "starlette", - "tensorflow-cpu>=2.3,<2.10", - "tensorflow>=2.3,<2.10", + "tensorflow-cpu>=2.3", + "tensorflow>=2.3", "tensorflow-text", "tf2onnx", "timeout-decorator", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 58e4a2cd42c372..434b87048405f6 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -60,8 +60,8 @@ "sigopt": "sigopt", "librosa": "librosa", "starlette": "starlette", - "tensorflow-cpu": "tensorflow-cpu>=2.3,<2.10", - "tensorflow": "tensorflow>=2.3,<2.10", + "tensorflow-cpu": "tensorflow-cpu>=2.3", + "tensorflow": "tensorflow>=2.3", "tensorflow-text": "tensorflow-text", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", From a26114777ee1c2802e91bd9cb26a3b39974d52ba Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Sat, 10 Sep 2022 09:11:46 -0400 Subject: [PATCH 255/539] Revert "TF: unpin maximum TF version (#18917)" (#18972) This reverts commit d8cf3b20875baee97f4bea64ffd17670aa57c37b. --- docker/transformers-all-latest-gpu/Dockerfile | 2 +- docker/transformers-cpu/Dockerfile | 4 ++-- docker/transformers-gpu/Dockerfile | 2 +- docker/transformers-tensorflow-cpu/Dockerfile | 4 ++-- docker/transformers-tensorflow-gpu/Dockerfile | 2 +- setup.py | 4 ++-- src/transformers/dependency_versions_table.py | 4 ++-- 7 files changed, 11 insertions(+), 11 deletions(-) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index 4db6f51826f02b..502c9a61fd6c4a 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -32,7 +32,7 @@ RUN echo torch=$VERSION # TODO: We might need to specify proper versions that work with a specific torch version (especially for past CI). RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA -RUN python3 -m pip install --no-cache-dir -U tensorflow +RUN python3 -m pip install --no-cache-dir -U tensorflow==2.9.1 RUN python3 -m pip uninstall -y flax jax # Use installed torch version for `torch-scatter` to avid to deal with PYTORCH='pre'. diff --git a/docker/transformers-cpu/Dockerfile b/docker/transformers-cpu/Dockerfile index c3590e4239e470..75a4f20a3b18fd 100644 --- a/docker/transformers-cpu/Dockerfile +++ b/docker/transformers-cpu/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ jupyter \ - tensorflow-cpu \ + tensorflow-cpu==2.9.1 \ torch WORKDIR /workspace @@ -23,4 +23,4 @@ COPY . transformers/ RUN cd transformers/ && \ python3 -m pip install --no-cache-dir . -CMD ["/bin/bash"] +CMD ["/bin/bash"] \ No newline at end of file diff --git a/docker/transformers-gpu/Dockerfile b/docker/transformers-gpu/Dockerfile index 0212eaa2a72b26..fc5c818438ba25 100644 --- a/docker/transformers-gpu/Dockerfile +++ b/docker/transformers-gpu/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ jupyter \ - tensorflow \ + tensorflow==2.9.1 \ torch RUN git clone https://github.com/NVIDIA/apex diff --git a/docker/transformers-tensorflow-cpu/Dockerfile b/docker/transformers-tensorflow-cpu/Dockerfile index ef3dc3d212cbbc..dbc81acbbb25a4 100644 --- a/docker/transformers-tensorflow-cpu/Dockerfile +++ b/docker/transformers-tensorflow-cpu/Dockerfile @@ -15,11 +15,11 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ mkl \ - tensorflow-cpu + tensorflow-cpu==2.9.1 WORKDIR /workspace COPY . transformers/ RUN cd transformers/ && \ python3 -m pip install --no-cache-dir . -CMD ["/bin/bash"] +CMD ["/bin/bash"] \ No newline at end of file diff --git a/docker/transformers-tensorflow-gpu/Dockerfile b/docker/transformers-tensorflow-gpu/Dockerfile index a05ace7d08e268..a24faad8f2fcfb 100644 --- a/docker/transformers-tensorflow-gpu/Dockerfile +++ b/docker/transformers-tensorflow-gpu/Dockerfile @@ -12,7 +12,7 @@ RUN git clone https://github.com/huggingface/transformers && cd transformers && RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-tensorflow,testing] # If set to nothing, will install the latest version -ARG TENSORFLOW='' +ARG TENSORFLOW='2.9.1' RUN [ ${#TENSORFLOW} -gt 0 ] && VERSION='tensorflow=='$TENSORFLOW'.*' || VERSION='tensorflow'; python3 -m pip install --no-cache-dir -U $VERSION RUN python3 -m pip uninstall -y torch flax diff --git a/setup.py b/setup.py index 3145272ef6f061..3aa0a86a4f607e 100644 --- a/setup.py +++ b/setup.py @@ -154,8 +154,8 @@ "sigopt", "librosa", "starlette", - "tensorflow-cpu>=2.3", - "tensorflow>=2.3", + "tensorflow-cpu>=2.3,<2.10", + "tensorflow>=2.3,<2.10", "tensorflow-text", "tf2onnx", "timeout-decorator", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 434b87048405f6..58e4a2cd42c372 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -60,8 +60,8 @@ "sigopt": "sigopt", "librosa": "librosa", "starlette": "starlette", - "tensorflow-cpu": "tensorflow-cpu>=2.3", - "tensorflow": "tensorflow>=2.3", + "tensorflow-cpu": "tensorflow-cpu>=2.3,<2.10", + "tensorflow": "tensorflow>=2.3,<2.10", "tensorflow-text": "tensorflow-text", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", From 9faa9f9dacf8c818ab2513da3ef92ce66f39515d Mon Sep 17 00:00:00 2001 From: Shijie Wu Date: Mon, 12 Sep 2022 05:00:24 -0400 Subject: [PATCH 256/539] remove unused activation dropout (#18842) --- src/transformers/models/opt/configuration_opt.py | 4 ---- src/transformers/models/opt/modeling_opt.py | 2 -- 2 files changed, 6 deletions(-) diff --git a/src/transformers/models/opt/configuration_opt.py b/src/transformers/models/opt/configuration_opt.py index a101bb3e866ff2..bdba8d37428d2c 100644 --- a/src/transformers/models/opt/configuration_opt.py +++ b/src/transformers/models/opt/configuration_opt.py @@ -67,8 +67,6 @@ class OPTConfig(PretrainedConfig): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. - activation_dropout (`float`, *optional*, defaults to 0.0): - The dropout ratio for activations inside the fully connected layer. layerdrop: (`float`, *optional*, defaults to 0.0): The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. @@ -106,7 +104,6 @@ def __init__( word_embed_proj_dim=None, dropout=0.1, attention_dropout=0.0, - activation_dropout=0.0, num_attention_heads=12, activation_function="relu", layerdrop=0.0, @@ -132,7 +129,6 @@ def __init__( self.num_hidden_layers = num_hidden_layers self.dropout = dropout self.attention_dropout = attention_dropout - self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.layerdrop = layerdrop diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 419c2391e4c708..b525498f8a1a2d 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -281,8 +281,6 @@ def __init__(self, config: OPTConfig): self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] - self.activation_dropout = config.activation_dropout - self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.ffn_dim) self.fc2 = nn.Linear(config.ffn_dim, self.embed_dim) From a86acb75ad832fd604a1d5b5e5089f299aae5df4 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Mon, 12 Sep 2022 19:37:25 +0800 Subject: [PATCH 257/539] add DDP HPO support for sigopt (#18931) only main_process will have HPO, and pass argument to other process Signed-off-by: Wang, Yi A Signed-off-by: Wang, Yi A --- src/transformers/integrations.py | 127 +++++++++++++++++++------------ 1 file changed, 79 insertions(+), 48 deletions(-) diff --git a/src/transformers/integrations.py b/src/transformers/integrations.py index 89493f779087b0..73ff50453b6f43 100644 --- a/src/transformers/integrations.py +++ b/src/transformers/integrations.py @@ -19,6 +19,7 @@ import json import numbers import os +import pickle import shutil import sys import tempfile @@ -28,11 +29,13 @@ import numpy as np from . import __version__ as version -from .utils import flatten_dict, is_datasets_available, logging +from .utils import flatten_dict, is_datasets_available, is_torch_available, logging logger = logging.get_logger(__name__) +if is_torch_available(): + import torch # comet_ml requires to be imported before any ML frameworks _has_comet = importlib.util.find_spec("comet_ml") is not None and os.getenv("COMET_MODE", "").upper() != "DISABLED" @@ -55,6 +58,7 @@ from .trainer_callback import ProgressCallback, TrainerCallback # noqa: E402 from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy # noqa: E402 +from .training_args import ParallelMode # noqa: E402 from .utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available # noqa: E402 @@ -317,67 +321,94 @@ def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> Be import sigopt from transformers.utils.versions import importlib_metadata - if importlib_metadata.version("sigopt") >= "8.0.0": - sigopt.set_project("huggingface") - - experiment = sigopt.create_experiment( - name="huggingface-tune", - type="offline", - parameters=trainer.hp_space(None), - metrics=[dict(name="objective", objective=direction, strategy="optimize")], - parallel_bandwidth=1, - budget=n_trials, - ) + if trainer.args.process_index == 0: + if importlib_metadata.version("sigopt") >= "8.0.0": + sigopt.set_project("huggingface") + + experiment = sigopt.create_experiment( + name="huggingface-tune", + type="offline", + parameters=trainer.hp_space(None), + metrics=[dict(name="objective", objective=direction, strategy="optimize")], + parallel_bandwidth=1, + budget=n_trials, + ) - logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") + logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") + + for run in experiment.loop(): + with run: + trainer.objective = None + trainer._hp_search_setup(run.run) + if trainer.args.world_size > 1: + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) + trainer.train(resume_from_checkpoint=None) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + run.log_metric("objective", trainer.objective) + + best = list(experiment.get_best_runs())[0] + best_run = BestRun(best.id, best.values["objective"].value, best.assignments) + else: + from sigopt import Connection + + conn = Connection() + proxies = kwargs.pop("proxies", None) + if proxies is not None: + conn.set_proxies(proxies) + + experiment = conn.experiments().create( + name="huggingface-tune", + parameters=trainer.hp_space(None), + metrics=[dict(name="objective", objective=direction, strategy="optimize")], + parallel_bandwidth=1, + observation_budget=n_trials, + project="huggingface", + ) + logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") - for run in experiment.loop(): - with run: + while experiment.progress.observation_count < experiment.observation_budget: + suggestion = conn.experiments(experiment.id).suggestions().create() trainer.objective = None - trainer.train(resume_from_checkpoint=None, trial=run.run) + trainer._hp_search_setup(suggestion) + if trainer.args.world_size > 1: + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) + trainer.train(resume_from_checkpoint=None) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) - run.log_metric("objective", trainer.objective) - best = list(experiment.get_best_runs())[0] - best_run = BestRun(best.id, best.values["objective"].value, best.assignments) - else: - from sigopt import Connection - - conn = Connection() - proxies = kwargs.pop("proxies", None) - if proxies is not None: - conn.set_proxies(proxies) - - experiment = conn.experiments().create( - name="huggingface-tune", - parameters=trainer.hp_space(None), - metrics=[dict(name="objective", objective=direction, strategy="optimize")], - parallel_bandwidth=1, - observation_budget=n_trials, - project="huggingface", - ) - logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}") + values = [dict(name="objective", value=trainer.objective)] + obs = conn.experiments(experiment.id).observations().create(suggestion=suggestion.id, values=values) + logger.info(f"[suggestion_id, observation_id]: [{suggestion.id}, {obs.id}]") + experiment = conn.experiments(experiment.id).fetch() - while experiment.progress.observation_count < experiment.observation_budget: - suggestion = conn.experiments(experiment.id).suggestions().create() + best = list(conn.experiments(experiment.id).best_assignments().fetch().iterate_pages())[0] + best_run = BestRun(best.id, best.value, best.assignments) + return best_run + else: + for i in range(n_trials): trainer.objective = None - trainer.train(resume_from_checkpoint=None, trial=suggestion) + args_main_rank = list(pickle.dumps(trainer.args)) + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + torch.distributed.broadcast_object_list(args_main_rank, src=0) + local_rank = trainer.args.local_rank # backup the local_rank info + trainer.args = pickle.loads(bytes(args_main_rank)) + trainer.args.local_rank = local_rank + trainer.train(resume_from_checkpoint=None) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) - - values = [dict(name="objective", value=trainer.objective)] - obs = conn.experiments(experiment.id).observations().create(suggestion=suggestion.id, values=values) - logger.info(f"[suggestion_id, observation_id]: [{suggestion.id}, {obs.id}]") - experiment = conn.experiments(experiment.id).fetch() - - best = list(conn.experiments(experiment.id).best_assignments().fetch().iterate_pages())[0] - best_run = BestRun(best.id, best.value, best.assignments) - return best_run + return None def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: From 0b36970371c7848c02d50b6b7fd5d2a53fd6ef74 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 12 Sep 2022 15:19:48 +0200 Subject: [PATCH 258/539] Remove `decoder_position_ids` from `check_decoder_model_past_large_inputs` (#18980) Co-authored-by: ydshieh --- tests/models/bart/test_modeling_tf_bart.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/tests/models/bart/test_modeling_tf_bart.py b/tests/models/bart/test_modeling_tf_bart.py index db06c84e0f5b86..69cf530ee6b322 100644 --- a/tests/models/bart/test_modeling_tf_bart.py +++ b/tests/models/bart/test_modeling_tf_bart.py @@ -125,21 +125,10 @@ def check_decoder_model_past_large_inputs(self, config, inputs_dict): next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) - decoder_position_ids = tf.cast(tf.cumsum(next_attention_mask, axis=1, exclusive=True), dtype=tf.int32) - output_from_no_past = model( - next_input_ids, attention_mask=next_attention_mask, position_ids=decoder_position_ids - ) + output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask) output_from_no_past = output_from_no_past[0] - decoder_position_ids = ( - tf.cast(tf.cumsum(next_attn_mask, axis=1, exclusive=True), dtype=tf.int32) + past_key_values[0][0].shape[2] - ) - output_from_past = model( - next_tokens, - attention_mask=next_attention_mask, - past_key_values=past_key_values, - position_ids=decoder_position_ids, - ) + output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values) output_from_past = output_from_past[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) From 367026000bbe9957f95eb1eb7d9649d78ac0b468 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 12 Sep 2022 15:20:31 +0200 Subject: [PATCH 259/539] create Past CI results as tables for GitHub issue (#18953) * create Past CI results as tables for GitHub issue Co-authored-by: ydshieh --- utils/get_ci_error_statistics.py | 87 ++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) diff --git a/utils/get_ci_error_statistics.py b/utils/get_ci_error_statistics.py index bca425663b9d06..9d0beeaaca37b6 100644 --- a/utils/get_ci_error_statistics.py +++ b/utils/get_ci_error_statistics.py @@ -107,6 +107,79 @@ def get_all_errors(artifact_dir): return errors, failed_tests +def reduce_by_error(logs, error_filter=None): + """count each error""" + + counter = Counter() + counter.update([x[1] for x in logs]) + counts = counter.most_common() + r = {} + for error, count in counts: + if error_filter is None or error not in error_filter: + r[error] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} + + r = dict(sorted(r.items(), key=lambda item: item[1]["count"], reverse=True)) + return r + + +def get_model(test): + """Get the model name from a test method""" + test = test.split("::")[0] + if test.startswith("tests/models/"): + test = test.split("/")[2] + else: + test = None + + return test + + +def reduce_by_model(logs, error_filter=None): + """count each error per model""" + + logs = [(x[0], x[1], get_model(x[2])) for x in logs] + logs = [x for x in logs if x[2] is not None] + tests = set([x[2] for x in logs]) + + r = {} + for test in tests: + counter = Counter() + # count by errors in `test` + counter.update([x[1] for x in logs if x[2] == test]) + counts = counter.most_common() + error_counts = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} + n_errors = sum(error_counts.values()) + if n_errors > 0: + r[test] = {"count": n_errors, "errors": error_counts} + + r = dict(sorted(r.items(), key=lambda item: item[1]["count"], reverse=True)) + return r + + +def make_github_table(reduced_by_error): + header = "| no. | error |" + sep = "|-:|:-|" + lines = [header, sep] + for error in reduced_by_error: + count = reduced_by_error[error]["count"] + line = f"| {count} | {error[:100]} |" + lines.append(line) + + return "\n".join(lines) + + +def make_github_table_per_model(reduced_by_model): + header = "| model | no. of errors | major error | count |" + sep = "|-:|-:|-:|-:|" + lines = [header, sep] + for model in reduced_by_model: + count = reduced_by_model[model]["count"] + error, _count = list(reduced_by_model[model]["errors"].items())[0] + line = f"| {model} | {count} | {error[:60]} | {_count} |" + lines.append(line) + + return "\n".join(lines) + + if __name__ == "__main__": parser = argparse.ArgumentParser() @@ -152,3 +225,17 @@ def get_all_errors(artifact_dir): with open(os.path.join(args.output_dir, "failed_tests.json"), "w", encoding="UTF-8") as fp: json.dump(failed_tests, fp, ensure_ascii=False, indent=4) + + # Produce tables for GitHub issue. + logs = [(error_line, error, failed_test) for (error_line, error), failed_test in zip(errors, failed_tests)] + + reduced_by_error = reduce_by_error(logs) + reduced_by_model = reduce_by_model(logs) + + s1 = make_github_table(reduced_by_error) + s2 = make_github_table_per_model(reduced_by_model) + + with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: + fp.write(s1) + with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: + fp.write(s2) From adbf3a40de3524dcdce556914e2cb974d81854e5 Mon Sep 17 00:00:00 2001 From: Shijie Wu Date: Mon, 12 Sep 2022 10:32:38 -0400 Subject: [PATCH 260/539] Remove dropout in embedding layer of OPT (#18845) --- src/transformers/models/opt/modeling_flax_opt.py | 2 -- src/transformers/models/opt/modeling_opt.py | 1 - src/transformers/models/opt/modeling_tf_opt.py | 1 - 3 files changed, 4 deletions(-) diff --git a/src/transformers/models/opt/modeling_flax_opt.py b/src/transformers/models/opt/modeling_flax_opt.py index 5762fae14b0957..adb38f4138aa07 100644 --- a/src/transformers/models/opt/modeling_flax_opt.py +++ b/src/transformers/models/opt/modeling_flax_opt.py @@ -484,8 +484,6 @@ def __call__( hidden_states = inputs_embeds + positions - hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) - hidden_state, all_hidden_states, attentions = self.layers( hidden_states, attention_mask, diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index b525498f8a1a2d..567e08d36f3939 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -637,7 +637,6 @@ def forward( inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds - hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None diff --git a/src/transformers/models/opt/modeling_tf_opt.py b/src/transformers/models/opt/modeling_tf_opt.py index 633e972069ee58..cfcd34ba1aff52 100644 --- a/src/transformers/models/opt/modeling_tf_opt.py +++ b/src/transformers/models/opt/modeling_tf_opt.py @@ -652,7 +652,6 @@ def call( inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds - hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None From cf450b776f1205c9938b978ed1e6913277eeb930 Mon Sep 17 00:00:00 2001 From: Matt Date: Mon, 12 Sep 2022 16:33:56 +0100 Subject: [PATCH 261/539] Fix TF start docstrings (#18991) * Update our TF 2.0 input format tip across all models * make style --- .../models/albert/modeling_tf_albert.py | 21 ++++++---- .../models/bart/modeling_tf_bart.py | 19 ++++++---- .../models/bert/modeling_tf_bert.py | 21 ++++++---- .../blenderbot/modeling_tf_blenderbot.py | 19 ++++++---- .../modeling_tf_blenderbot_small.py | 19 ++++++---- .../models/camembert/modeling_tf_camembert.py | 21 ++++++---- .../models/clip/modeling_tf_clip.py | 23 ++++++----- .../models/convbert/modeling_tf_convbert.py | 21 ++++++---- .../models/convnext/modeling_tf_convnext.py | 24 +++++++++--- .../models/ctrl/modeling_tf_ctrl.py | 21 ++++++---- .../data2vec/modeling_tf_data2vec_vision.py | 24 +++++++++--- .../models/deberta/modeling_tf_deberta.py | 21 ++++++---- .../deberta_v2/modeling_tf_deberta_v2.py | 21 ++++++---- .../distilbert/modeling_tf_distilbert.py | 25 +++++++----- .../models/dpr/modeling_tf_dpr.py | 21 ++++++---- .../models/electra/modeling_tf_electra.py | 21 ++++++---- .../models/flaubert/modeling_tf_flaubert.py | 21 ++++++---- .../models/funnel/modeling_tf_funnel.py | 21 ++++++---- .../models/gpt2/modeling_tf_gpt2.py | 21 ++++++---- .../models/gptj/modeling_tf_gptj.py | 21 ++++++---- .../models/hubert/modeling_tf_hubert.py | 21 ++++++---- .../models/layoutlm/modeling_tf_layoutlm.py | 21 ++++++---- .../layoutlmv3/modeling_tf_layoutlmv3.py | 24 +++++++++--- .../models/led/modeling_tf_led.py | 19 ++++++---- .../longformer/modeling_tf_longformer.py | 21 ++++++---- .../models/lxmert/modeling_tf_lxmert.py | 21 ++++++---- .../models/marian/modeling_tf_marian.py | 19 ++++++---- .../models/mbart/modeling_tf_mbart.py | 19 ++++++---- .../mobilebert/modeling_tf_mobilebert.py | 21 ++++++---- .../models/mobilevit/modeling_tf_mobilevit.py | 24 +++++++++--- .../models/mpnet/modeling_tf_mpnet.py | 25 +++++++----- .../models/openai/modeling_tf_openai.py | 21 ++++++---- .../models/opt/modeling_tf_opt.py | 19 ++++++---- .../models/pegasus/modeling_tf_pegasus.py | 19 ++++++---- .../models/rembert/modeling_tf_rembert.py | 21 ++++++---- .../models/roberta/modeling_tf_roberta.py | 21 ++++++---- .../models/roformer/modeling_tf_roformer.py | 21 ++++++---- .../modeling_tf_speech_to_text.py | 19 ++++++---- src/transformers/models/t5/modeling_tf_t5.py | 21 ++++++---- .../models/tapas/modeling_tf_tapas.py | 21 ++++++---- .../transfo_xl/modeling_tf_transfo_xl.py | 21 ++++++---- .../models/vit/modeling_tf_vit.py | 24 +++++++++--- .../models/vit_mae/modeling_tf_vit_mae.py | 24 +++++++++--- .../models/wav2vec2/modeling_tf_wav2vec2.py | 21 ++++++---- .../models/xglm/modeling_tf_xglm.py | 19 ++++++---- .../models/xlm/modeling_tf_xlm.py | 21 ++++++---- .../xlm_roberta/modeling_tf_xlm_roberta.py | 21 ++++++---- .../models/xlnet/modeling_tf_xlnet.py | 21 ++++++---- ...tf_{{cookiecutter.lowercase_modelname}}.py | 38 +++++++++++-------- 49 files changed, 678 insertions(+), 376 deletions(-) diff --git a/src/transformers/models/albert/modeling_tf_albert.py b/src/transformers/models/albert/modeling_tf_albert.py index b07ddf4762a2c8..5bef3cc39c00ab 100644 --- a/src/transformers/models/albert/modeling_tf_albert.py +++ b/src/transformers/models/albert/modeling_tf_albert.py @@ -694,23 +694,28 @@ class TFAlbertForPreTrainingOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index 17c0ce7a710502..45aef4768b176c 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -514,16 +514,17 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -531,6 +532,10 @@ def serving(self, inputs): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/bert/modeling_tf_bert.py b/src/transformers/models/bert/modeling_tf_bert.py index aad730dc11d9e2..8ab88b9730418d 100644 --- a/src/transformers/models/bert/modeling_tf_bert.py +++ b/src/transformers/models/bert/modeling_tf_bert.py @@ -964,23 +964,28 @@ class TFBertForPreTrainingOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py index 2f4ee837678433..4bd4cd481b3e1d 100644 --- a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py @@ -514,16 +514,17 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -531,6 +532,10 @@ def serving(self, inputs): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py index 4472539d282a51..84eddc2cc35e1a 100644 --- a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py @@ -514,16 +514,17 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -531,6 +532,10 @@ def serving(self, inputs): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/camembert/modeling_tf_camembert.py b/src/transformers/models/camembert/modeling_tf_camembert.py index b773bb761d048c..708282188bbbaa 100644 --- a/src/transformers/models/camembert/modeling_tf_camembert.py +++ b/src/transformers/models/camembert/modeling_tf_camembert.py @@ -47,23 +47,28 @@ - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/clip/modeling_tf_clip.py b/src/transformers/models/clip/modeling_tf_clip.py index d302f9c7c16b1b..94656a0b39ab51 100644 --- a/src/transformers/models/clip/modeling_tf_clip.py +++ b/src/transformers/models/clip/modeling_tf_clip.py @@ -913,22 +913,27 @@ class TFCLIPPreTrainedModel(TFPreTrainedModel): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: - `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: - `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! diff --git a/src/transformers/models/convbert/modeling_tf_convbert.py b/src/transformers/models/convbert/modeling_tf_convbert.py index d9d76dd4e2761c..81e3b0b019c097 100644 --- a/src/transformers/models/convbert/modeling_tf_convbert.py +++ b/src/transformers/models/convbert/modeling_tf_convbert.py @@ -638,23 +638,28 @@ class TFConvBertPreTrainedModel(TFPreTrainedModel): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/convnext/modeling_tf_convnext.py b/src/transformers/models/convnext/modeling_tf_convnext.py index 0be2d291923812..680c036e538497 100644 --- a/src/transformers/models/convnext/modeling_tf_convnext.py +++ b/src/transformers/models/convnext/modeling_tf_convnext.py @@ -399,13 +399,27 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. - - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! diff --git a/src/transformers/models/ctrl/modeling_tf_ctrl.py b/src/transformers/models/ctrl/modeling_tf_ctrl.py index 45a09988e72bde..0cfcb44f65ef8a 100644 --- a/src/transformers/models/ctrl/modeling_tf_ctrl.py +++ b/src/transformers/models/ctrl/modeling_tf_ctrl.py @@ -418,23 +418,28 @@ class TFCTRLPreTrainedModel(TFPreTrainedModel): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py index 33e9921cc9a58c..363bbf3ff6daff 100644 --- a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py @@ -816,13 +816,27 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. - - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! diff --git a/src/transformers/models/deberta/modeling_tf_deberta.py b/src/transformers/models/deberta/modeling_tf_deberta.py index edb9b2b0855532..652389f3b54a29 100644 --- a/src/transformers/models/deberta/modeling_tf_deberta.py +++ b/src/transformers/models/deberta/modeling_tf_deberta.py @@ -1004,23 +1004,28 @@ class TFDebertaPreTrainedModel(TFPreTrainedModel): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py index fa9a202427e5e9..bb1d55692c485a 100644 --- a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py @@ -1097,23 +1097,28 @@ class TFDebertaV2PreTrainedModel(TFPreTrainedModel): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/distilbert/modeling_tf_distilbert.py b/src/transformers/models/distilbert/modeling_tf_distilbert.py index 737fc1e3c71af0..64c3338b648175 100644 --- a/src/transformers/models/distilbert/modeling_tf_distilbert.py +++ b/src/transformers/models/distilbert/modeling_tf_distilbert.py @@ -447,22 +447,27 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: - `model([input_ids, attention_mask])` + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: - `model({"input_ids": input_ids})` + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! diff --git a/src/transformers/models/dpr/modeling_tf_dpr.py b/src/transformers/models/dpr/modeling_tf_dpr.py index 7955e067254cf6..96ee761b819d08 100644 --- a/src/transformers/models/dpr/modeling_tf_dpr.py +++ b/src/transformers/models/dpr/modeling_tf_dpr.py @@ -404,23 +404,28 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/electra/modeling_tf_electra.py b/src/transformers/models/electra/modeling_tf_electra.py index 2ac72c2371d890..d2426cae9c23b3 100644 --- a/src/transformers/models/electra/modeling_tf_electra.py +++ b/src/transformers/models/electra/modeling_tf_electra.py @@ -841,23 +841,28 @@ class TFElectraForPreTrainingOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/flaubert/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py index c74e8ded9ba423..d475c5774a6bc3 100644 --- a/src/transformers/models/flaubert/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -73,23 +73,28 @@ - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/funnel/modeling_tf_funnel.py b/src/transformers/models/funnel/modeling_tf_funnel.py index ba74871dd1d0dc..2bf59d0c3dbea3 100644 --- a/src/transformers/models/funnel/modeling_tf_funnel.py +++ b/src/transformers/models/funnel/modeling_tf_funnel.py @@ -1015,23 +1015,28 @@ class TFFunnelForPreTrainingOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/gpt2/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py index 5581b700a3eb62..a876511fff3db2 100644 --- a/src/transformers/models/gpt2/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -607,23 +607,28 @@ class TFGPT2DoubleHeadsModelOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/gptj/modeling_tf_gptj.py b/src/transformers/models/gptj/modeling_tf_gptj.py index a1071408fb0dc1..943d9b1fff0410 100644 --- a/src/transformers/models/gptj/modeling_tf_gptj.py +++ b/src/transformers/models/gptj/modeling_tf_gptj.py @@ -550,23 +550,28 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/hubert/modeling_tf_hubert.py b/src/transformers/models/hubert/modeling_tf_hubert.py index c33eb504594190..b3aab9a1e857e7 100644 --- a/src/transformers/models/hubert/modeling_tf_hubert.py +++ b/src/transformers/models/hubert/modeling_tf_hubert.py @@ -1347,23 +1347,28 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_values` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_values` only and nothing else: `model(input_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_values": input_values, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py index a166b1709b5643..26ae260dd17de0 100644 --- a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py @@ -817,23 +817,28 @@ class TFLayoutLMPreTrainedModel(TFPreTrainedModel): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py index 85a44e4ff52ae8..418ab2b1486097 100644 --- a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py @@ -1019,13 +1019,27 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. - - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index 2262d1ce8d6c5c..6435516fb573c2 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -1553,16 +1553,17 @@ class TFLEDSeq2SeqLMOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -1570,6 +1571,10 @@ class TFLEDSeq2SeqLMOutput(ModelOutput): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index 0dfd9c66617f29..eab0d8005476b4 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -1911,23 +1911,28 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/lxmert/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py index 08c4dedce50e88..88535aee12048c 100644 --- a/src/transformers/models/lxmert/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -829,23 +829,28 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index 2ceac449c1ccf3..27fceae6d9a7dd 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -553,16 +553,17 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -570,6 +571,10 @@ def serving(self, inputs): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index 3f6a44fcf4d096..91cc3373283f18 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -519,16 +519,17 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -536,6 +537,10 @@ def serving(self, inputs): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py index 3a17f20202d84c..25fe9327460f45 100644 --- a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py @@ -867,23 +867,28 @@ class TFMobileBertForPreTrainingOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/mobilevit/modeling_tf_mobilevit.py b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py index c54bd6554dc35d..41b9b313b0f4b8 100644 --- a/src/transformers/models/mobilevit/modeling_tf_mobilevit.py +++ b/src/transformers/models/mobilevit/modeling_tf_mobilevit.py @@ -778,13 +778,27 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. - - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index 41432a6fb5e52f..7b63dff489043c 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -584,22 +584,27 @@ def call( - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensor in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: - `model([input_ids, attention_mask])` + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: - `model({"input_ids": input_ids, "attention_mask": attention_mask})` + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! diff --git a/src/transformers/models/openai/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py index 8a176190862816..b6056233d5a8a8 100644 --- a/src/transformers/models/openai/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -411,23 +411,28 @@ class TFOpenAIGPTDoubleHeadsModelOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/opt/modeling_tf_opt.py b/src/transformers/models/opt/modeling_tf_opt.py index cfcd34ba1aff52..535a616db39e9a 100644 --- a/src/transformers/models/opt/modeling_tf_opt.py +++ b/src/transformers/models/opt/modeling_tf_opt.py @@ -378,16 +378,17 @@ def call( - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -395,6 +396,10 @@ def call( - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/pegasus/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py index dbf822060d1686..35d59c6b125607 100644 --- a/src/transformers/models/pegasus/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -554,16 +554,17 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -571,6 +572,10 @@ def serving(self, inputs): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/rembert/modeling_tf_rembert.py b/src/transformers/models/rembert/modeling_tf_rembert.py index 2e25dafed48302..aea9fa325b9930 100644 --- a/src/transformers/models/rembert/modeling_tf_rembert.py +++ b/src/transformers/models/rembert/modeling_tf_rembert.py @@ -843,23 +843,28 @@ def dummy_inputs(self): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index a320664bcea57f..c2e1477c7ee265 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -821,23 +821,28 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/roformer/modeling_tf_roformer.py b/src/transformers/models/roformer/modeling_tf_roformer.py index 6bad9797733920..852b1424b406bb 100644 --- a/src/transformers/models/roformer/modeling_tf_roformer.py +++ b/src/transformers/models/roformer/modeling_tf_roformer.py @@ -711,23 +711,28 @@ class TFRoFormerPreTrainedModel(TFPreTrainedModel): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py index dd575575de6daa..b269b2cb85aedd 100755 --- a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py @@ -630,16 +630,17 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -647,6 +648,10 @@ def serving(self, inputs): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/t5/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py index dc909c8d8f3349..b8a9e86ac99229 100644 --- a/src/transformers/models/t5/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -954,23 +954,28 @@ def _shift_right(self, input_ids): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/tapas/modeling_tf_tapas.py b/src/transformers/models/tapas/modeling_tf_tapas.py index 0e7539546d49dc..ea379a039d5a7e 100644 --- a/src/transformers/models/tapas/modeling_tf_tapas.py +++ b/src/transformers/models/tapas/modeling_tf_tapas.py @@ -888,23 +888,28 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py index 66467350f14218..b0d26e6edf5191 100644 --- a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py @@ -806,23 +806,28 @@ class TFTransfoXLSequenceClassifierOutputWithPast(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/vit/modeling_tf_vit.py b/src/transformers/models/vit/modeling_tf_vit.py index 1db9cf58032d0f..754a86ce2814ee 100644 --- a/src/transformers/models/vit/modeling_tf_vit.py +++ b/src/transformers/models/vit/modeling_tf_vit.py @@ -593,13 +593,27 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. - - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! diff --git a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py index a5bf778c4c830f..3cb702b4f6a4bd 100644 --- a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py @@ -737,13 +737,27 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. - - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `pixel_values` only and nothing else: `model(pixel_values)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([pixel_values, attention_mask])` or `model([pixel_values, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"pixel_values": pixel_values, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! diff --git a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py index 854831e45a0956..9e80886ee55ca4 100644 --- a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py @@ -1380,23 +1380,28 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_values` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_values` only and nothing else: `model(input_values)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_values": input_values, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/xglm/modeling_tf_xglm.py b/src/transformers/models/xglm/modeling_tf_xglm.py index 1b6828b09817ad..7ed399ac29ae1d 100644 --- a/src/transformers/models/xglm/modeling_tf_xglm.py +++ b/src/transformers/models/xglm/modeling_tf_xglm.py @@ -672,16 +672,17 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -689,6 +690,10 @@ def serving(self, inputs): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Args: diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index 3b5f1c6e26500d..c472ecbeabf847 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -575,23 +575,28 @@ class TFXLMWithLMHeadModelOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py index 0edd4158fbc1df..d7bdd92fc98abf 100644 --- a/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py +++ b/src/transformers/models/xlm_roberta/modeling_tf_xlm_roberta.py @@ -47,23 +47,28 @@ - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/src/transformers/models/xlnet/modeling_tf_xlnet.py b/src/transformers/models/xlnet/modeling_tf_xlnet.py index 2e2fb1ea0875a3..739ad50ecb644f 100644 --- a/src/transformers/models/xlnet/modeling_tf_xlnet.py +++ b/src/transformers/models/xlnet/modeling_tf_xlnet.py @@ -1020,23 +1020,28 @@ class TFXLNetForQuestionAnsweringSimpleOutput(ModelOutput): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the - tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the - first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + Parameters: diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index cd83f1f10b1abd..c92b80144c263c 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -834,23 +834,27 @@ def dummy_inputs(self): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having - all the tensors in the first argument of the model call function: `model(inputs)`. + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating + your own layers or models with the Keras `Functional` API, there are three possibilities you + can use to gather all the input Tensors in the first positional argument: - If you choose this second option, there are three possibilities you can use to gather all the input Tensors - in the first positional argument : - - - a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)` + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with (subclassing)[https://keras.io/guides/making_new_layers_and_models_via_subclassing/] + then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python + function! + Args: @@ -2101,16 +2105,16 @@ def serving(self, inputs): - TF 2.0 models accepts two formats as inputs: + TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - - having all inputs as a list, tuple or dict in the first positional arguments. + - having all inputs as a list, tuple or dict in the first positional argument. - This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all - the tensors in the first argument of the model call function: `model(inputs)`. - - If you choose this second option, there are three possibilities you can use to gather all the input Tensors in - the first positional argument : + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating + your own layers or models with the Keras `Functional` API, there are three possibilities you + can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: @@ -2118,6 +2122,10 @@ def serving(self, inputs): - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + Note that when creating models and layers with (subclassing)[https://keras.io/guides/making_new_layers_and_models_via_subclassing/] + then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python + function! + Args: From f7ceda345d5cdaea0f6f9afcfb5e04352fd0a7a5 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 12 Sep 2022 12:09:37 -0400 Subject: [PATCH 262/539] Align try_to_load_from_cache with huggingface_hub (#18966) * Align try_to_load_from_cache with huggingface_hub * Fix tests --- src/transformers/utils/hub.py | 63 ++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 26 deletions(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 31c3257ffd3646..3e5863e4eff54c 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -222,18 +222,27 @@ def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None -def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None, commit_hash=None): +def try_to_load_from_cache( + repo_id: str, + filename: str, + cache_dir: Union[str, Path, None] = None, + revision: Optional[str] = None, +) -> Optional[str]: """ - Explores the cache to return the latest cached file for a given revision. + Explores the cache to return the latest cached file for a given revision if found. + + This function will not raise any exception if the file in not cached. Args: - cache_dir (`str` or `os.PathLike`): The folder where the cached files lie. - repo_id (`str`): The ID of the repo on huggingface.co. - filename (`str`): The filename to look for inside `repo_id`. + cache_dir (`str` or `os.PathLike`): + The folder where the cached files lie. + repo_id (`str`): + The ID of the repo on huggingface.co. + filename (`str`): + The filename to look for inside `repo_id`. revision (`str`, *optional*): The specific model version to use. Will default to `"main"` if it's not provided and no `commit_hash` is provided either. - commit_hash (`str`, *optional*): The (full) commit hash to look for inside the cache. Returns: `Optional[str]` or `_CACHED_NO_EXIST`: @@ -242,36 +251,36 @@ def try_to_load_from_cache(cache_dir, repo_id, filename, revision=None, commit_h - A special value `_CACHED_NO_EXIST` if the file does not exist at the given commit hash and this fact was cached. """ - if commit_hash is not None and revision is not None: - raise ValueError("`commit_hash` and `revision` are mutually exclusive, pick one only.") - if revision is None and commit_hash is None: + if revision is None: revision = "main" - model_id = repo_id.replace("/", "--") - model_cache = os.path.join(cache_dir, f"models--{model_id}") - if not os.path.isdir(model_cache): + if cache_dir is None: + cache_dir = TRANSFORMERS_CACHE + + object_id = repo_id.replace("/", "--") + repo_cache = os.path.join(cache_dir, f"models--{object_id}") + if not os.path.isdir(repo_cache): # No cache for this model return None for subfolder in ["refs", "snapshots"]: - if not os.path.isdir(os.path.join(model_cache, subfolder)): + if not os.path.isdir(os.path.join(repo_cache, subfolder)): return None - if commit_hash is None: - # Resolve refs (for instance to convert main to the associated commit sha) - cached_refs = os.listdir(os.path.join(model_cache, "refs")) - if revision in cached_refs: - with open(os.path.join(model_cache, "refs", revision)) as f: - commit_hash = f.read() + # Resolve refs (for instance to convert main to the associated commit sha) + cached_refs = os.listdir(os.path.join(repo_cache, "refs")) + if revision in cached_refs: + with open(os.path.join(repo_cache, "refs", revision)) as f: + revision = f.read() - if os.path.isfile(os.path.join(model_cache, ".no_exist", commit_hash, filename)): + if os.path.isfile(os.path.join(repo_cache, ".no_exist", revision, filename)): return _CACHED_NO_EXIST - cached_shas = os.listdir(os.path.join(model_cache, "snapshots")) - if commit_hash not in cached_shas: + cached_shas = os.listdir(os.path.join(repo_cache, "snapshots")) + if revision not in cached_shas: # No cache for this revision and we won't try to return a random revision return None - cached_file = os.path.join(model_cache, "snapshots", commit_hash, filename) + cached_file = os.path.join(repo_cache, "snapshots", revision, filename) return cached_file if os.path.isfile(cached_file) else None @@ -375,7 +384,9 @@ def cached_file( if _commit_hash is not None: # If the file is cached under that commit hash, we return it directly. - resolved_file = try_to_load_from_cache(cache_dir, path_or_repo_id, full_filename, commit_hash=_commit_hash) + resolved_file = try_to_load_from_cache( + path_or_repo_id, full_filename, cache_dir=cache_dir, revision=_commit_hash + ) if resolved_file is not None: if resolved_file is not _CACHED_NO_EXIST: return resolved_file @@ -416,7 +427,7 @@ def cached_file( ) except LocalEntryNotFoundError: # We try to see if we have a cached version (not up to date): - resolved_file = try_to_load_from_cache(cache_dir, path_or_repo_id, full_filename, revision=revision) + resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) if resolved_file is not None: return resolved_file if not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors: @@ -438,7 +449,7 @@ def cached_file( ) except HTTPError as err: # First we try to see if we have a cached version (not up to date): - resolved_file = try_to_load_from_cache(cache_dir, path_or_repo_id, full_filename, revision=revision) + resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) if resolved_file is not None: return resolved_file if not _raise_exceptions_for_connection_errors: From c126a239bcea9c68453cf86045a5177afbe2be6c Mon Sep 17 00:00:00 2001 From: Matt Date: Mon, 12 Sep 2022 17:51:10 +0100 Subject: [PATCH 263/539] Fix tflongformer int dtype (#18907) * Use int64 throughout TFLongFormer * make style * Do some more fixed casting in TFLongFormer * Fix some wonky "is None" conditionals * Cast all the dtypes, salt the earth * Fix copies to TFLED as well and do some casting there * dtype fix in TFLongformer test * Make fixup * Expand tolerances on the LED tests too (I think this is a TF32 thing) * Expand test tolerances for LED a tiny bit (probably a Tensorfloat thing again) --- .../models/led/modeling_tf_led.py | 16 +-- .../longformer/modeling_tf_longformer.py | 95 ++++++++++++---- tests/models/led/test_modeling_tf_led.py | 4 +- .../longformer/test_modeling_tf_longformer.py | 106 +++++++++--------- 4 files changed, 137 insertions(+), 84 deletions(-) diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index 6435516fb573c2..c677581635f93b 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -472,7 +472,7 @@ def _sliding_chunks_query_key_matmul(self, query, key, window_overlap): ) first_chunk_mask = ( tf.tile( - tf.range(chunks_count + 1)[None, :, None, None], + tf.range(chunks_count + 1, dtype=tf.int64)[None, :, None, None], (batch_size * num_heads, 1, window_overlap, window_overlap), ) < 1 @@ -1335,10 +1335,10 @@ class TFLEDPreTrainedModel(TFPreTrainedModel): @property def dummy_inputs(self): - input_ids = tf.convert_to_tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0]]) + input_ids = tf.convert_to_tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0]], dtype=tf.int64) # make sure global layers are initialized - attention_mask = tf.convert_to_tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0]]) - global_attention_mask = tf.convert_to_tensor([[0, 0, 0, 0, 1], [0, 0, 1, 0, 0]]) + attention_mask = tf.convert_to_tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0]], dtype=tf.int64) + global_attention_mask = tf.convert_to_tensor([[0, 0, 0, 0, 1], [0, 0, 1, 0, 0]], dtype=tf.int64) dummy_inputs = { "input_ids": input_ids, "attention_mask": attention_mask, @@ -1350,10 +1350,10 @@ def dummy_inputs(self): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), - "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"), - "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + "decoder_input_ids": tf.TensorSpec((None, None), tf.int64, name="decoder_input_ids"), + "decoder_attention_mask": tf.TensorSpec((None, None), tf.int64, name="decoder_attention_mask"), } ] ) diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index eab0d8005476b4..6b491638cc5f78 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -395,11 +395,10 @@ def _compute_global_attention_mask(input_ids_shape, sep_token_indices, before_se Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is True` else after `sep_token_id`. """ - assert shape_list(sep_token_indices)[1] == 2, "`input_ids` should have two dimensions" question_end_index = tf.reshape(sep_token_indices, (input_ids_shape[0], 3, 2))[:, 0, 1][:, None] # bool attention mask with True in locations of global attention - attention_mask = tf.expand_dims(tf.range(input_ids_shape[1]), axis=0) + attention_mask = tf.expand_dims(tf.range(input_ids_shape[1], dtype=tf.int64), axis=0) attention_mask = tf.tile(attention_mask, (input_ids_shape[0], 1)) if before_sep_token is True: question_end_index = tf.tile(question_end_index, (1, input_ids_shape[1])) @@ -468,10 +467,9 @@ def call(self, hidden_states): return hidden_states -# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaEmbeddings with Roberta->Longformer class TFLongformerEmbeddings(tf.keras.layers.Layer): """ - Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. + Same as BertEmbeddings with a tiny tweak for positional embeddings indexing and some extra casting. """ def __init__(self, config, **kwargs): @@ -547,7 +545,7 @@ def call( input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: - token_type_ids = tf.fill(dims=input_shape, value=0) + token_type_ids = tf.cast(tf.fill(dims=input_shape, value=0), tf.int64) if position_ids is None: if input_ids is not None: @@ -557,7 +555,8 @@ def call( ) else: position_ids = tf.expand_dims( - tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0 + tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1, dtype=tf.int64), + axis=0, ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) @@ -998,7 +997,7 @@ def _sliding_chunks_query_key_matmul(self, query, key, window_overlap): ) first_chunk_mask = ( tf.tile( - tf.range(chunks_count + 1)[None, :, None, None], + tf.range(chunks_count + 1, dtype=tf.int64)[None, :, None, None], (batch_size * num_heads, 1, window_overlap, window_overlap), ) < 1 @@ -1701,6 +1700,21 @@ def call( training=False, ): + if input_ids is not None and not isinstance(input_ids, tf.Tensor): + input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64) + elif input_ids is not None: + input_ids = tf.cast(input_ids, tf.int64) + + if attention_mask is not None and not isinstance(attention_mask, tf.Tensor): + attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64) + elif attention_mask is not None: + attention_mask = tf.cast(attention_mask, tf.int64) + + if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor): + global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64) + elif global_attention_mask is not None: + global_attention_mask = tf.cast(global_attention_mask, tf.int64) + if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: @@ -1711,10 +1725,10 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: - attention_mask = tf.fill(input_shape, 1) + attention_mask = tf.cast(tf.fill(input_shape, 1), tf.int64) if token_type_ids is None: - token_type_ids = tf.fill(input_shape, 0) + token_type_ids = tf.cast(tf.fill(input_shape, 0), tf.int64) # merge `global_attention_mask` and `attention_mask` if global_attention_mask is not None: @@ -1831,7 +1845,7 @@ def _pad_to_window_size( if inputs_embeds is not None: def pad_embeddings(): - input_ids_padding = tf.fill((batch_size, padding_len), self.pad_token_id) + input_ids_padding = tf.cast(tf.fill((batch_size, padding_len), self.pad_token_id), tf.int64) inputs_embeds_padding = self.embeddings(input_ids_padding) return tf.concat([inputs_embeds, inputs_embeds_padding], axis=-2) @@ -1875,10 +1889,15 @@ class TFLongformerPreTrainedModel(TFPreTrainedModel): @property def dummy_inputs(self): - input_ids = tf.convert_to_tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]) + input_ids = tf.convert_to_tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]], dtype=tf.int64) # make sure global layers are initialized - attention_mask = tf.convert_to_tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]]) - global_attention_mask = tf.convert_to_tensor([[0, 0, 0, 0, 1], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1]]) + attention_mask = tf.convert_to_tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]], dtype=tf.int64) + global_attention_mask = tf.convert_to_tensor( + [[0, 0, 0, 0, 1], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1]], dtype=tf.int64 + ) + global_attention_mask = tf.convert_to_tensor( + [[0, 0, 0, 0, 1], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1]], dtype=tf.int64 + ) return { "input_ids": input_ids, "attention_mask": attention_mask, @@ -1888,8 +1907,8 @@ def dummy_inputs(self): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) @@ -2235,6 +2254,21 @@ def call( are not taken into account for computing the loss. """ + if input_ids is not None and not isinstance(input_ids, tf.Tensor): + input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64) + elif input_ids is not None: + input_ids = tf.cast(input_ids, tf.int64) + + if attention_mask is not None and not isinstance(attention_mask, tf.Tensor): + attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64) + elif attention_mask is not None: + attention_mask = tf.cast(attention_mask, tf.int64) + + if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor): + global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64) + elif global_attention_mask is not None: + global_attention_mask = tf.cast(global_attention_mask, tf.int64) + # set global attention on question tokens if global_attention_mask is None and input_ids is not None: if shape_list(tf.where(input_ids == self.config.sep_token_id))[0] != 3 * shape_list(input_ids)[0]: @@ -2244,12 +2278,12 @@ def call( " forward function to avoid this. This is most likely an error. The global attention is disabled" " for this forward pass." ) - global_attention_mask = tf.fill(shape_list(input_ids), value=0) + global_attention_mask = tf.cast(tf.fill(shape_list(input_ids), value=0), tf.int64) else: logger.info("Initializing global attention on question tokens...") # put global attention on all tokens until `config.sep_token_id` is reached sep_token_indices = tf.where(input_ids == self.config.sep_token_id) - sep_token_indices = tf.cast(sep_token_indices, dtype=input_ids.dtype) + sep_token_indices = tf.cast(sep_token_indices, dtype=tf.int64) global_attention_mask = _compute_global_attention_mask(shape_list(input_ids), sep_token_indices) outputs = self.longformer( @@ -2375,13 +2409,28 @@ def call( training: Optional[bool] = False, ) -> Union[TFLongformerSequenceClassifierOutput, Tuple[tf.Tensor]]: + if input_ids is not None and not isinstance(input_ids, tf.Tensor): + input_ids = tf.convert_to_tensor(input_ids, dtype=tf.int64) + elif input_ids is not None: + input_ids = tf.cast(input_ids, tf.int64) + + if attention_mask is not None and not isinstance(attention_mask, tf.Tensor): + attention_mask = tf.convert_to_tensor(attention_mask, dtype=tf.int64) + elif attention_mask is not None: + attention_mask = tf.cast(attention_mask, tf.int64) + + if global_attention_mask is not None and not isinstance(global_attention_mask, tf.Tensor): + global_attention_mask = tf.convert_to_tensor(global_attention_mask, dtype=tf.int64) + elif global_attention_mask is not None: + global_attention_mask = tf.cast(global_attention_mask, tf.int64) + if global_attention_mask is None and input_ids is not None: logger.info("Initializing global attention on CLS token...") # global attention on cls token global_attention_mask = tf.zeros_like(input_ids) - updates = tf.ones(shape_list(input_ids)[0], dtype=tf.int32) + updates = tf.ones(shape_list(input_ids)[0], dtype=tf.int64) indices = tf.pad( - tensor=tf.expand_dims(tf.range(shape_list(input_ids)[0]), axis=1), + tensor=tf.expand_dims(tf.range(shape_list(input_ids)[0], dtype=tf.int64), axis=1), paddings=[[0, 0], [0, 1]], constant_values=0, ) @@ -2453,9 +2502,9 @@ def __init__(self, config, *inputs, **kwargs): @property def dummy_inputs(self): - input_ids = tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS) + input_ids = tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS, dtype=tf.int64) # make sure global layers are initialized - global_attention_mask = tf.convert_to_tensor([[[0, 0, 0, 1], [0, 0, 0, 1]]] * 2) + global_attention_mask = tf.convert_to_tensor([[[0, 0, 0, 1], [0, 0, 0, 1]]] * 2, dtype=tf.int64) return {"input_ids": input_ids, "global_attention_mask": global_attention_mask} @unpack_inputs @@ -2547,8 +2596,8 @@ def call( @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/tests/models/led/test_modeling_tf_led.py b/tests/models/led/test_modeling_tf_led.py index dfdb66606fafbc..32ce09aaa0a905 100644 --- a/tests/models/led/test_modeling_tf_led.py +++ b/tests/models/led/test_modeling_tf_led.py @@ -412,7 +412,7 @@ def test_inference_no_head(self): expected_slice = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]], ) - tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE) + tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-3) def test_inference_with_head(self): model = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384") @@ -428,4 +428,4 @@ def test_inference_with_head(self): expected_slice = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]], ) - tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE) + tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-3, rtol=1e-3) diff --git a/tests/models/longformer/test_modeling_tf_longformer.py b/tests/models/longformer/test_modeling_tf_longformer.py index cc62bb6caf7096..60a8ce01f4af45 100644 --- a/tests/models/longformer/test_modeling_tf_longformer.py +++ b/tests/models/longformer/test_modeling_tf_longformer.py @@ -115,7 +115,7 @@ def create_and_check_attention_mask_determinism( ): model = TFLongformerModel(config=config) - attention_mask = tf.ones(input_ids.shape, dtype=tf.dtypes.int32) + attention_mask = tf.ones(input_ids.shape, dtype=tf.int64) output_with_mask = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] tf.debugging.assert_near(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], rtol=1e-4) @@ -403,26 +403,24 @@ def test_diagonalize(self): # first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000] tf.debugging.assert_near(padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], rtol=1e-3) - tf.debugging.assert_near(padded_hidden_states[0, 0, 0, 4:], tf.zeros((3,), dtype=tf.dtypes.float32), rtol=1e-3) + tf.debugging.assert_near(padded_hidden_states[0, 0, 0, 4:], tf.zeros((3,), dtype=tf.float32), rtol=1e-3) # last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629] tf.debugging.assert_near(padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], rtol=1e-3) - tf.debugging.assert_near( - padded_hidden_states[0, 0, -1, :3], tf.zeros((3,), dtype=tf.dtypes.float32), rtol=1e-3 - ) + tf.debugging.assert_near(padded_hidden_states[0, 0, -1, :3], tf.zeros((3,), dtype=tf.float32), rtol=1e-3) def test_pad_and_transpose_last_two_dims(self): hidden_states = self._get_hidden_states() self.assertEqual(shape_list(hidden_states), [1, 4, 8]) # pad along seq length dim - paddings = tf.constant([[0, 0], [0, 0], [0, 1], [0, 0]], dtype=tf.dtypes.int32) + paddings = tf.constant([[0, 0], [0, 0], [0, 1], [0, 0]], dtype=tf.int64) hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) padded_hidden_states = TFLongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, paddings) self.assertTrue(shape_list(padded_hidden_states) == [1, 1, 8, 5]) - expected_added_dim = tf.zeros((5,), dtype=tf.dtypes.float32) + expected_added_dim = tf.zeros((5,), dtype=tf.float32) tf.debugging.assert_near(expected_added_dim, padded_hidden_states[0, 0, -1, :], rtol=1e-6) tf.debugging.assert_near( hidden_states[0, 0, -1, :], tf.reshape(padded_hidden_states, (1, -1))[0, 24:32], rtol=1e-6 @@ -441,10 +439,10 @@ def test_mask_invalid_locations(self): hid_states_3 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states[:, :, :, :3], 2) hid_states_4 = TFLongformerSelfAttention._mask_invalid_locations(hidden_states[:, :, 2:, :], 2) - self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_1), tf.dtypes.int32)) == 8) - self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_2), tf.dtypes.int32)) == 24) - self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_3), tf.dtypes.int32)) == 24) - self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_4), tf.dtypes.int32)) == 12) + self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_1), tf.int64)) == 8) + self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_2), tf.int64)) == 24) + self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_3), tf.int64)) == 24) + self.assertTrue(tf.math.reduce_sum(tf.cast(tf.math.is_inf(hid_states_4), tf.int64)) == 12) def test_chunk(self): hidden_states = self._get_hidden_states() @@ -456,12 +454,14 @@ def test_chunk(self): chunked_hidden_states = TFLongformerSelfAttention._chunk(hidden_states, window_overlap=2) # expected slices across chunk and seq length dim - expected_slice_along_seq_length = tf.convert_to_tensor([0.4983, -0.7584, -1.6944], dtype=tf.dtypes.float32) - expected_slice_along_chunk = tf.convert_to_tensor([0.4983, -1.8348, -0.7584, 2.0514], dtype=tf.dtypes.float32) + expected_slice_along_seq_length = tf.convert_to_tensor([0.4983, -0.7584, -1.6944], dtype=tf.float32) + expected_slice_along_chunk = tf.convert_to_tensor([0.4983, -1.8348, -0.7584, 2.0514], dtype=tf.float32) self.assertTrue(shape_list(chunked_hidden_states) == [1, 3, 4, 4]) - tf.debugging.assert_near(chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, rtol=1e-3) - tf.debugging.assert_near(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, rtol=1e-3) + tf.debugging.assert_near( + chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, rtol=1e-3, atol=1e-4 + ) + tf.debugging.assert_near(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, rtol=1e-3, atol=1e-4) def test_layer_local_attn(self): model = TFLongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") @@ -469,7 +469,7 @@ def test_layer_local_attn(self): hidden_states = self._get_hidden_states() batch_size, seq_length, hidden_size = hidden_states.shape - attention_mask = tf.zeros((batch_size, seq_length), dtype=tf.dtypes.float32) + attention_mask = tf.zeros((batch_size, seq_length), dtype=tf.float32) is_index_global_attn = tf.math.greater(attention_mask, 1) is_global_attn = tf.math.reduce_any(is_index_global_attn) @@ -483,11 +483,11 @@ def test_layer_local_attn(self): )[0] expected_slice = tf.convert_to_tensor( - [0.00188, 0.012196, -0.017051, -0.025571, -0.02996, 0.017297, -0.011521, 0.004848], dtype=tf.dtypes.float32 + [0.00188, 0.012196, -0.017051, -0.025571, -0.02996, 0.017297, -0.011521, 0.004848], dtype=tf.float32 ) self.assertEqual(output_hidden_states.shape, (1, 4, 8)) - tf.debugging.assert_near(output_hidden_states[0, 1], expected_slice, rtol=1e-3) + tf.debugging.assert_near(output_hidden_states[0, 1], expected_slice, rtol=1e-3, atol=1e-4) def test_layer_global_attn(self): model = TFLongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") @@ -498,8 +498,8 @@ def test_layer_global_attn(self): batch_size, seq_length, hidden_size = hidden_states.shape # create attn mask - attention_mask_1 = tf.zeros((1, 1, 1, seq_length), dtype=tf.dtypes.float32) - attention_mask_2 = tf.zeros((1, 1, 1, seq_length), dtype=tf.dtypes.float32) + attention_mask_1 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) + attention_mask_2 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 1, 10000.0, attention_mask_1) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 2, -10000.0, attention_mask_1) @@ -525,15 +525,15 @@ def test_layer_global_attn(self): self.assertEqual(output_hidden_states.shape, (2, 4, 8)) expected_slice_0 = tf.convert_to_tensor( - [-0.06508, -0.039306, 0.030934, -0.03417, -0.00656, -0.01553, -0.02088, -0.04938], dtype=tf.dtypes.float32 + [-0.06508, -0.039306, 0.030934, -0.03417, -0.00656, -0.01553, -0.02088, -0.04938], dtype=tf.float32 ) expected_slice_1 = tf.convert_to_tensor( - [-0.04055, -0.038399, 0.0396, -0.03735, -0.03415, 0.01357, 0.00145, -0.05709], dtype=tf.dtypes.float32 + [-0.04055, -0.038399, 0.0396, -0.03735, -0.03415, 0.01357, 0.00145, -0.05709], dtype=tf.float32 ) - tf.debugging.assert_near(output_hidden_states[0, 2], expected_slice_0, rtol=1e-3) - tf.debugging.assert_near(output_hidden_states[1, -2], expected_slice_1, rtol=1e-3) + tf.debugging.assert_near(output_hidden_states[0, 2], expected_slice_0, rtol=1e-3, atol=1e-4) + tf.debugging.assert_near(output_hidden_states[1, -2], expected_slice_1, rtol=1e-3, atol=1e-4) def test_layer_attn_probs(self): model = TFLongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") @@ -542,8 +542,8 @@ def test_layer_attn_probs(self): batch_size, seq_length, hidden_size = hidden_states.shape # create attn mask - attention_mask_1 = tf.zeros((1, 1, 1, seq_length), dtype=tf.dtypes.float32) - attention_mask_2 = tf.zeros((1, 1, 1, seq_length), dtype=tf.dtypes.float32) + attention_mask_1 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) + attention_mask_2 = tf.zeros((1, 1, 1, seq_length), dtype=tf.float32) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 1, 10000.0, attention_mask_1) attention_mask_1 = tf.where(tf.range(4)[None, :, None, None] > 2, -10000.0, attention_mask_1) @@ -584,18 +584,16 @@ def test_layer_attn_probs(self): tf.debugging.assert_near( local_attentions[0, 0, 0, :], - tf.convert_to_tensor( - [0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000], dtype=tf.dtypes.float32 - ), + tf.convert_to_tensor([0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000], dtype=tf.float32), rtol=1e-3, + atol=1e-4, ) tf.debugging.assert_near( local_attentions[1, 0, 0, :], - tf.convert_to_tensor( - [0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000], dtype=tf.dtypes.float32 - ), + tf.convert_to_tensor([0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000], dtype=tf.float32), rtol=1e-3, + atol=1e-4, ) # All the global attention weights must sum to 1. @@ -603,13 +601,15 @@ def test_layer_attn_probs(self): tf.debugging.assert_near( global_attentions[0, 0, 1, :], - tf.convert_to_tensor([0.2500, 0.2500, 0.2500, 0.2500], dtype=tf.dtypes.float32), + tf.convert_to_tensor([0.2500, 0.2500, 0.2500, 0.2500], dtype=tf.float32), rtol=1e-3, + atol=1e-4, ) tf.debugging.assert_near( global_attentions[1, 0, 0, :], - tf.convert_to_tensor([0.2497, 0.2500, 0.2499, 0.2504], dtype=tf.dtypes.float32), + tf.convert_to_tensor([0.2497, 0.2500, 0.2499, 0.2504], dtype=tf.float32), rtol=1e-3, + atol=1e-4, ) @slow @@ -617,31 +617,31 @@ def test_inference_no_head(self): model = TFLongformerModel.from_pretrained("allenai/longformer-base-4096") # 'Hello world!' - input_ids = tf.convert_to_tensor([[0, 20920, 232, 328, 1437, 2]], dtype=tf.dtypes.int32) - attention_mask = tf.ones(shape_list(input_ids), dtype=tf.dtypes.int32) + input_ids = tf.convert_to_tensor([[0, 20920, 232, 328, 1437, 2]], dtype=tf.int64) + attention_mask = tf.ones(shape_list(input_ids), dtype=tf.int64) output = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] - expected_output_slice = tf.convert_to_tensor( - [0.0549, 0.1087, -0.1119, -0.0368, 0.0250], dtype=tf.dtypes.float32 - ) + expected_output_slice = tf.convert_to_tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], dtype=tf.float32) - tf.debugging.assert_near(output[0, 0, -5:], expected_output_slice, rtol=1e-3) - tf.debugging.assert_near(output_without_mask[0, 0, -5:], expected_output_slice, rtol=1e-3) + tf.debugging.assert_near(output[0, 0, -5:], expected_output_slice, rtol=1e-3, atol=1e-4) + tf.debugging.assert_near(output_without_mask[0, 0, -5:], expected_output_slice, rtol=1e-3, atol=1e-4) @slow def test_inference_no_head_long(self): model = TFLongformerModel.from_pretrained("allenai/longformer-base-4096") # 'Hello world! ' repeated 1000 times - input_ids = tf.convert_to_tensor([[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=tf.dtypes.int32) + input_ids = tf.convert_to_tensor([[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=tf.int64) - attention_mask = tf.ones(shape_list(input_ids), dtype=tf.dtypes.int32) - global_attention_mask = tf.zeros(shape_list(input_ids), dtype=tf.dtypes.int32) + attention_mask = tf.ones(shape_list(input_ids), dtype=tf.int64) + global_attention_mask = tf.zeros(shape_list(input_ids), dtype=tf.int64) # Set global attention on a few random positions global_attention_mask = tf.tensor_scatter_nd_update( - global_attention_mask, tf.constant([[0, 1], [0, 4], [0, 21]]), tf.constant([1, 1, 1]) + global_attention_mask, + tf.constant([[0, 1], [0, 4], [0, 21]], dtype=tf.int64), + tf.constant([1, 1, 1], dtype=tf.int64), ) output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0] @@ -650,15 +650,15 @@ def test_inference_no_head_long(self): expected_output_mean = tf.constant(0.024267) # assert close - tf.debugging.assert_near(tf.reduce_sum(output), expected_output_sum, rtol=1e-4) - tf.debugging.assert_near(tf.reduce_mean(output), expected_output_mean, rtol=1e-4) + tf.debugging.assert_near(tf.reduce_sum(output), expected_output_sum, rtol=1e-4, atol=1e-4) + tf.debugging.assert_near(tf.reduce_mean(output), expected_output_mean, rtol=1e-4, atol=1e-4) @slow def test_inference_masked_lm_long(self): model = TFLongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") # 'Hello world! ' repeated 1000 times - input_ids = tf.convert_to_tensor([[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=tf.dtypes.int32) + input_ids = tf.convert_to_tensor([[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=tf.int64) output = model(input_ids, labels=input_ids) loss = output.loss @@ -669,9 +669,13 @@ def test_inference_masked_lm_long(self): expected_prediction_scores_mean = tf.constant(-3.03477) # assert close - tf.debugging.assert_near(tf.reduce_mean(loss), expected_loss, rtol=1e-4) - tf.debugging.assert_near(tf.reduce_sum(prediction_scores), expected_prediction_scores_sum, rtol=1e-4) - tf.debugging.assert_near(tf.reduce_mean(prediction_scores), expected_prediction_scores_mean, rtol=1e-4) + tf.debugging.assert_near(tf.reduce_mean(loss), expected_loss, rtol=1e-4, atol=1e-4) + tf.debugging.assert_near( + tf.reduce_sum(prediction_scores), expected_prediction_scores_sum, rtol=1e-4, atol=1e-4 + ) + tf.debugging.assert_near( + tf.reduce_mean(prediction_scores), expected_prediction_scores_mean, rtol=1e-4, atol=1e-4 + ) @slow def test_inference_masked_lm(self): From 8a6928e28b3230f0bc640fa55dc498fb035983d1 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Mon, 12 Sep 2022 18:35:45 +0100 Subject: [PATCH 264/539] TF: correct TFBart embeddings weights name when load_weight_prefix is passed (#18993) --- .../models/bart/modeling_tf_bart.py | 26 ++++++++++++++++--- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index 45aef4768b176c..35e8665984d786 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -16,6 +16,7 @@ import random +from contextlib import nullcontext from typing import Optional, Tuple, Union import numpy as np @@ -748,7 +749,15 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: - with tf.name_scope(self.embed_tokens.name + "/"): + # if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name + # scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope` + # is used with a name ending in `/`, that name replaces the current name scope. + # (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0) + if hasattr(self.embed_tokens, "load_weight_prefix"): + context_manager = tf.name_scope(self.embed_tokens.load_weight_prefix + "/") + else: + context_manager = nullcontext() + with context_manager: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -936,7 +945,15 @@ def call( positions = self.embed_positions(input_shape, position_ids=position_ids) if inputs_embeds is None: - with tf.name_scope(self.embed_tokens.name + "/"): + # if `self.embed_tokens.load_weight_prefix` is set, runs the embedding operation with the correct name + # scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope` + # is used with a name ending in `/`, that name replaces the current name scope. + # (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0) + if hasattr(self.embed_tokens, "load_weight_prefix"): + context_manager = tf.name_scope(self.embed_tokens.load_weight_prefix + "/") + else: + context_manager = nullcontext() + with context_manager: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds @@ -1032,8 +1049,9 @@ class TFBartMainLayer(tf.keras.layers.Layer): def __init__(self, config: BartConfig, load_weight_prefix=None, **kwargs): super().__init__(**kwargs) self.config = config - load_weight_prefix = "model.shared" if load_weight_prefix is None else load_weight_prefix - self.shared = tf.keras.layers.Embedding(config.vocab_size, config.d_model, name=load_weight_prefix) + self.shared = tf.keras.layers.Embedding(config.vocab_size, config.d_model, name="model.shared") + # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) + self.shared.load_weight_prefix = "model.shared" if load_weight_prefix is None else load_weight_prefix self.encoder = TFBartEncoder(config, self.shared, name="encoder") self.decoder = TFBartDecoder(config, self.shared, name="decoder") From 39b5bb79d914f2af723f3aaf92f96fce8b957559 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 12 Sep 2022 19:39:01 +0200 Subject: [PATCH 265/539] fix checkpoint name for wav2vec2 conformer (#18994) * fix checkpoint name for wav2vec2 conformer Co-authored-by: ydshieh --- .../configuration_wav2vec2_conformer.py | 10 +++++----- .../wav2vec2_conformer/modeling_wav2vec2_conformer.py | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py index 9c5e4d205b9af7..11181f5601a197 100644 --- a/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/configuration_wav2vec2_conformer.py @@ -24,8 +24,8 @@ logger = logging.get_logger(__name__) WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "facebook/wav2vec2-conformer-large-rel-pos": ( - "https://huggingface.co/facebook/wav2vec2-conformer-large-rel-pos/resolve/main/config.json" + "facebook/wav2vec2-conformer-rel-pos-large": ( + "https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large/resolve/main/config.json" ), } @@ -35,7 +35,7 @@ class Wav2Vec2ConformerConfig(PretrainedConfig): This is the configuration class to store the configuration of a [`Wav2Vec2ConformerModel`]. It is used to instantiate an Wav2Vec2Conformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Wav2Vec2Conformer - [facebook/wav2vec2-conformer-large-rel-pos](https://huggingface.co/facebook/wav2vec2-conformer-large-rel-pos) + [facebook/wav2vec2-conformer-rel-pos-large](https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the @@ -195,10 +195,10 @@ class Wav2Vec2ConformerConfig(PretrainedConfig): ```python >>> from transformers import Wav2Vec2ConformerModel, Wav2Vec2ConformerConfig - >>> # Initializing a Wav2Vec2Conformer facebook/wav2vec2-conformer-large-rel-pos style configuration + >>> # Initializing a Wav2Vec2Conformer facebook/wav2vec2-conformer-rel-pos-large style configuration >>> configuration = Wav2Vec2ConformerConfig() - >>> # Initializing a model from the facebook/wav2vec2-conformer-large-rel-pos style configuration + >>> # Initializing a model from the facebook/wav2vec2-conformer-rel-pos-large style configuration >>> model = Wav2Vec2ConformerModel(configuration) >>> # Accessing the model configuration diff --git a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py index 4c4962b155c35c..5bee0d040c8ba4 100644 --- a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py @@ -80,7 +80,7 @@ WAV2VEC2_CONFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "facebook/wav2vec2-conformer-large-rel-pos", + "facebook/wav2vec2-conformer-rel-pos-large", # See all Wav2Vec2Conformer models at https://huggingface.co/models?filter=wav2vec2-conformer ] @@ -1226,7 +1226,7 @@ def _set_gradient_checkpointing(self, module, value=False): `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, such as - [wav2vec2_conformer-base](https://huggingface.co/facebook/wav2vec2-conformer-large-rel-pos), + [wav2vec2-conformer-rel-pos-large](https://huggingface.co/facebook/wav2vec2-conformer-rel-pos-large), `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or From 7f4708e1a26c0431dd2b025e4b6eb5b80507683d Mon Sep 17 00:00:00 2001 From: Partho Date: Mon, 12 Sep 2022 23:41:40 +0530 Subject: [PATCH 266/539] added type hints (#18996) --- .../modeling_bigbird_pegasus.py | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py index 3cdfe7d2ffe097..40ed916b0c71e7 100755 --- a/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py +++ b/src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py @@ -1806,13 +1806,13 @@ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embed def forward( self, - input_ids=None, - attention_mask=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, ): r""" Args: @@ -2127,18 +2127,18 @@ def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_em def forward( self, - input_ids=None, - attention_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - head_mask=None, - cross_attn_head_mask=None, - past_key_values=None, - inputs_embeds=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, ): r""" Args: From 1182b945a6ea0f8cf53b1058d383fb9bc5ff02e8 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Mon, 12 Sep 2022 19:30:27 +0100 Subject: [PATCH 267/539] TF: TF 2.10 unpin + related onnx test skips (#18995) --- docker/transformers-all-latest-gpu/Dockerfile | 2 +- docker/transformers-cpu/Dockerfile | 4 ++-- docker/transformers-gpu/Dockerfile | 2 +- docker/transformers-tensorflow-cpu/Dockerfile | 4 ++-- docker/transformers-tensorflow-gpu/Dockerfile | 2 +- setup.py | 4 ++-- src/transformers/dependency_versions_table.py | 4 ++-- tests/models/bart/test_modeling_tf_bart.py | 5 +++++ tests/models/bert/test_modeling_tf_bert.py | 5 +++++ tests/models/gpt2/test_modeling_tf_gpt2.py | 5 +++++ tests/models/layoutlm/test_modeling_tf_layoutlm.py | 5 +++++ tests/models/wav2vec2/test_modeling_tf_wav2vec2.py | 2 ++ 12 files changed, 33 insertions(+), 11 deletions(-) diff --git a/docker/transformers-all-latest-gpu/Dockerfile b/docker/transformers-all-latest-gpu/Dockerfile index 502c9a61fd6c4a..4db6f51826f02b 100644 --- a/docker/transformers-all-latest-gpu/Dockerfile +++ b/docker/transformers-all-latest-gpu/Dockerfile @@ -32,7 +32,7 @@ RUN echo torch=$VERSION # TODO: We might need to specify proper versions that work with a specific torch version (especially for past CI). RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA -RUN python3 -m pip install --no-cache-dir -U tensorflow==2.9.1 +RUN python3 -m pip install --no-cache-dir -U tensorflow RUN python3 -m pip uninstall -y flax jax # Use installed torch version for `torch-scatter` to avid to deal with PYTORCH='pre'. diff --git a/docker/transformers-cpu/Dockerfile b/docker/transformers-cpu/Dockerfile index 75a4f20a3b18fd..c3590e4239e470 100644 --- a/docker/transformers-cpu/Dockerfile +++ b/docker/transformers-cpu/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ jupyter \ - tensorflow-cpu==2.9.1 \ + tensorflow-cpu \ torch WORKDIR /workspace @@ -23,4 +23,4 @@ COPY . transformers/ RUN cd transformers/ && \ python3 -m pip install --no-cache-dir . -CMD ["/bin/bash"] \ No newline at end of file +CMD ["/bin/bash"] diff --git a/docker/transformers-gpu/Dockerfile b/docker/transformers-gpu/Dockerfile index fc5c818438ba25..0212eaa2a72b26 100644 --- a/docker/transformers-gpu/Dockerfile +++ b/docker/transformers-gpu/Dockerfile @@ -15,7 +15,7 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ jupyter \ - tensorflow==2.9.1 \ + tensorflow \ torch RUN git clone https://github.com/NVIDIA/apex diff --git a/docker/transformers-tensorflow-cpu/Dockerfile b/docker/transformers-tensorflow-cpu/Dockerfile index dbc81acbbb25a4..ef3dc3d212cbbc 100644 --- a/docker/transformers-tensorflow-cpu/Dockerfile +++ b/docker/transformers-tensorflow-cpu/Dockerfile @@ -15,11 +15,11 @@ RUN apt update && \ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ python3 -m pip install --no-cache-dir \ mkl \ - tensorflow-cpu==2.9.1 + tensorflow-cpu WORKDIR /workspace COPY . transformers/ RUN cd transformers/ && \ python3 -m pip install --no-cache-dir . -CMD ["/bin/bash"] \ No newline at end of file +CMD ["/bin/bash"] diff --git a/docker/transformers-tensorflow-gpu/Dockerfile b/docker/transformers-tensorflow-gpu/Dockerfile index a24faad8f2fcfb..a05ace7d08e268 100644 --- a/docker/transformers-tensorflow-gpu/Dockerfile +++ b/docker/transformers-tensorflow-gpu/Dockerfile @@ -12,7 +12,7 @@ RUN git clone https://github.com/huggingface/transformers && cd transformers && RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-tensorflow,testing] # If set to nothing, will install the latest version -ARG TENSORFLOW='2.9.1' +ARG TENSORFLOW='' RUN [ ${#TENSORFLOW} -gt 0 ] && VERSION='tensorflow=='$TENSORFLOW'.*' || VERSION='tensorflow'; python3 -m pip install --no-cache-dir -U $VERSION RUN python3 -m pip uninstall -y torch flax diff --git a/setup.py b/setup.py index 3aa0a86a4f607e..3145272ef6f061 100644 --- a/setup.py +++ b/setup.py @@ -154,8 +154,8 @@ "sigopt", "librosa", "starlette", - "tensorflow-cpu>=2.3,<2.10", - "tensorflow>=2.3,<2.10", + "tensorflow-cpu>=2.3", + "tensorflow>=2.3", "tensorflow-text", "tf2onnx", "timeout-decorator", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 58e4a2cd42c372..434b87048405f6 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -60,8 +60,8 @@ "sigopt": "sigopt", "librosa": "librosa", "starlette": "starlette", - "tensorflow-cpu": "tensorflow-cpu>=2.3,<2.10", - "tensorflow": "tensorflow>=2.3,<2.10", + "tensorflow-cpu": "tensorflow-cpu>=2.3", + "tensorflow": "tensorflow>=2.3", "tensorflow-text": "tensorflow-text", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", diff --git a/tests/models/bart/test_modeling_tf_bart.py b/tests/models/bart/test_modeling_tf_bart.py index 69cf530ee6b322..f47824fc08cc65 100644 --- a/tests/models/bart/test_modeling_tf_bart.py +++ b/tests/models/bart/test_modeling_tf_bart.py @@ -223,6 +223,11 @@ def test_model_common_attributes(self): def test_saved_model_creation(self): pass + # TODO (Joao): fix me + @unittest.skip("Onnx compliancy broke with TF 2.10") + def test_onnx_compliancy(self): + pass + def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) diff --git a/tests/models/bert/test_modeling_tf_bert.py b/tests/models/bert/test_modeling_tf_bert.py index e83ae9f71802d0..451f54325d13e0 100644 --- a/tests/models/bert/test_modeling_tf_bert.py +++ b/tests/models/bert/test_modeling_tf_bert.py @@ -740,6 +740,11 @@ def test_custom_load_tf_weights(self): for layer in output_loading_info["missing_keys"]: self.assertTrue(layer.split("_")[0] in ["dropout", "classifier"]) + # TODO (Joao): fix me + @unittest.skip("Onnx compliancy broke with TF 2.10") + def test_onnx_compliancy(self): + pass + @require_tf class TFBertModelIntegrationTest(unittest.TestCase): diff --git a/tests/models/gpt2/test_modeling_tf_gpt2.py b/tests/models/gpt2/test_modeling_tf_gpt2.py index b4752a155c34bf..d97a2b3ed93630 100644 --- a/tests/models/gpt2/test_modeling_tf_gpt2.py +++ b/tests/models/gpt2/test_modeling_tf_gpt2.py @@ -451,6 +451,11 @@ def test_onnx_runtime_optimize(self): onnxruntime.InferenceSession(onnx_model_proto.SerializeToString()) + # TODO (Joao): fix me + @unittest.skip("Onnx compliancy broke with TF 2.10") + def test_onnx_compliancy(self): + pass + @require_tf class TFGPT2ModelLanguageGenerationTest(unittest.TestCase): diff --git a/tests/models/layoutlm/test_modeling_tf_layoutlm.py b/tests/models/layoutlm/test_modeling_tf_layoutlm.py index 4224f20a1da76d..7bcf6e590b9857 100644 --- a/tests/models/layoutlm/test_modeling_tf_layoutlm.py +++ b/tests/models/layoutlm/test_modeling_tf_layoutlm.py @@ -256,6 +256,11 @@ def test_model_from_pretrained(self): model = TFLayoutLMModel.from_pretrained(model_name) self.assertIsNotNone(model) + # TODO (Joao): fix me + @unittest.skip("Onnx compliancy broke with TF 2.10") + def test_onnx_compliancy(self): + pass + def prepare_layoutlm_batch_inputs(): # Here we prepare a batch of 2 sequences to test a LayoutLM forward pass on: diff --git a/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py b/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py index 3418a5a76b07fc..665bf2d4f22cf3 100644 --- a/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py +++ b/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py @@ -435,6 +435,8 @@ def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) + # TODO (Joao): fix me + @unittest.skip("Broke with TF 2.10") def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) From 4c2e983f44ce4d3b9c8502d42cc568e45897bd15 Mon Sep 17 00:00:00 2001 From: Tobias Nusser Date: Mon, 12 Sep 2022 21:03:48 +0200 Subject: [PATCH 268/539] Fixed typo (#18921) Fixed typo itmes --> items --- docs/source/en/preprocessing.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/preprocessing.mdx b/docs/source/en/preprocessing.mdx index e67741633acb7a..d710d41f660286 100644 --- a/docs/source/en/preprocessing.mdx +++ b/docs/source/en/preprocessing.mdx @@ -54,7 +54,7 @@ Then pass your sentence to the tokenizer: 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` -The tokenizer returns a dictionary with three important itmes: +The tokenizer returns a dictionary with three important items: * [input_ids](glossary#input-ids) are the indices corresponding to each token in the sentence. * [attention_mask](glossary#attention-mask) indicates whether a token should be attended to or not. From 470799b3a67c6e078b9cb3a38dc0395a70e1a4a2 Mon Sep 17 00:00:00 2001 From: Chris Emezue <36100251+chrisemezue@users.noreply.github.com> Date: Mon, 12 Sep 2022 15:59:19 -0400 Subject: [PATCH 269/539] Removed issue in wav2vec link (#18945) Fix connected to [this issue](https://github.com/huggingface/transformers/issues/18944) --- docs/source/en/tasks/audio_classification.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/tasks/audio_classification.mdx b/docs/source/en/tasks/audio_classification.mdx index 33a469ac5a79cf..99cc3064653fb3 100644 --- a/docs/source/en/tasks/audio_classification.mdx +++ b/docs/source/en/tasks/audio_classification.mdx @@ -117,7 +117,7 @@ The [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset has a sam The preprocessing function needs to: 1. Call the `audio` column to load and if necessary resample the audio file. -2. Check the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information on the Wav2Vec2 [model card]((https://huggingface.co/facebook/wav2vec2-base)). +2. Check the sampling rate of the audio file matches the sampling rate of the audio data a model was pretrained with. You can find this information on the Wav2Vec2 [model card](https://huggingface.co/facebook/wav2vec2-base). 3. Set a maximum input length so longer inputs are batched without being truncated. ```py @@ -189,4 +189,4 @@ At this point, only three steps remain: For a more in-depth example of how to fine-tune a model for audio classification, take a look at the corresponding [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb). - \ No newline at end of file +
From 69df33f18076f4e509e09f8697c2f4bb0c063a85 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Tue, 13 Sep 2022 09:36:03 +0300 Subject: [PATCH 270/539] Fix MaskFormerFeatureExtractor instance segmentation preprocessing bug (#18997) * fix preprocessing for instance segmentation maps * add support for per-image instance2class_id mapping * edit docstrings for clarity --- .../feature_extraction_maskformer.py | 103 ++++++++++-------- 1 file changed, 60 insertions(+), 43 deletions(-) diff --git a/src/transformers/models/maskformer/feature_extraction_maskformer.py b/src/transformers/models/maskformer/feature_extraction_maskformer.py index 3a5fd49d80fa77..8514bb26da2ac2 100644 --- a/src/transformers/models/maskformer/feature_extraction_maskformer.py +++ b/src/transformers/models/maskformer/feature_extraction_maskformer.py @@ -69,11 +69,12 @@ class MaskFormerFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionM The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. ignore_index (`int`, *optional*): - Value of the index (label) to be removed from the segmentation maps. + Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels + denoted with 0 (background) will be replaced with `ignore_index`. reduce_labels (`bool`, *optional*, defaults to `False`): - Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is - used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The - background label will be replaced by `ignore_index`. + Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 + is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). + The background label will be replaced by `ignore_index`. """ @@ -162,7 +163,7 @@ def __call__( images: ImageInput, segmentation_maps: ImageInput = None, pad_and_return_pixel_mask: Optional[bool] = True, - instance_id_to_semantic_id: Optional[Dict[int, int]] = None, + instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchFeature: @@ -191,7 +192,9 @@ def __call__( number of channels, H and W are image height and width. segmentation_maps (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`, *optional*): - Optionally, the corresponding semantic segmentation maps with the pixel-wise annotations. + The corresponding semantic segmentation maps with the pixel-wise class id annotations or instance + segmentation maps with pixel-wise instance id annotations. Assumed to be semantic segmentation maps if + no `instance_id_to_semantic_id map` is provided. pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. @@ -201,10 +204,11 @@ def __call__( - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). - instance_id_to_semantic_id (`Dict[int, int]`, *optional*): - If passed, we treat `segmentation_maps` as an instance segmentation map where each pixel represents an - instance id. To convert it to a binary mask of shape (`batch, num_labels, height, width`) we need a - dictionary mapping instance ids to label ids to create a semantic segmentation map. + instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): + A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an + instance segmentation map where each pixel represents an instance id. Can be provided as a single + dictionary with a global / dataset-level mapping or as a list of dictionaries (one per image), to map + instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` @@ -215,11 +219,11 @@ def __call__( - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if - *"pixel_mask"* is in `self.model_input_names`). - - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model - (when `annotations` are provided). - - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when - `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of + `pixel_mask` is in `self.model_input_names`). + - **mask_labels** -- Optional list of mask labels of shape `(num_class_labels, height, width)` to be fed to + a model (when `annotations` are provided). + - **class_labels** -- Optional list of class labels of shape `(num_class_labels)` to be fed to a model + (when `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. """ # Input type checking for clearer error @@ -319,26 +323,31 @@ def convert_segmentation_map_to_binary_masks( segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ): + # Get unique ids (class or instance ids based on input) + all_labels = np.unique(segmentation_map) + + # Drop background label if applicable if self.reduce_labels: - if self.ignore_index is None: - raise ValueError("`ignore_index` must be set when `reduce_labels` is `True`.") - segmentation_map[segmentation_map == 0] = self.ignore_index - # instances ids start from 1! - segmentation_map -= 1 - segmentation_map[segmentation_map == self.ignore_index - 1] = self.ignore_index + all_labels = all_labels[all_labels != 0] + + # Generate a binary mask for each object instance + binary_masks = [np.ma.masked_where(segmentation_map == i, segmentation_map) for i in all_labels] + binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width) + # Convert instance ids to class ids if instance_id_to_semantic_id is not None: - # segmentation_map will be treated as an instance segmentation map where each pixel is a instance id - # thus it has to be converted to a semantic segmentation map - for instance_id, label_id in instance_id_to_semantic_id.items(): - segmentation_map[segmentation_map == instance_id] = label_id - # get all the labels in the image - labels = np.unique(segmentation_map) - # remove ignore index (if we have one) - if self.ignore_index is not None: - labels = labels[labels != self.ignore_index] - # helping broadcast by making mask [1,W,H] and labels [C, 1, 1] - binary_masks = segmentation_map[None] == labels[:, None, None] + labels = np.zeros(all_labels.shape[0]) + + for label in all_labels: + class_id = instance_id_to_semantic_id[label] + labels[all_labels == label] = class_id + else: + labels = all_labels + + # Decrement labels by 1 + if self.reduce_labels: + labels -= 1 + return binary_masks.astype(np.float32), labels.astype(np.int64) def encode_inputs( @@ -346,7 +355,7 @@ def encode_inputs( pixel_values_list: List["np.ndarray"], segmentation_maps: ImageInput = None, pad_and_return_pixel_mask: bool = True, - instance_id_to_semantic_id: Optional[Dict[int, int]] = None, + instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, return_tensors: Optional[Union[str, TensorType]] = None, ): """ @@ -374,10 +383,11 @@ def encode_inputs( - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). - instance_id_to_semantic_id (`Dict[int, int]`, *optional*): - If passed, we treat `segmentation_maps` as an instance segmentation map where each pixel represents an - instance id. To convert it to a binary mask of shape (`batch, num_labels, height, width`) we need a - dictionary mapping instance ids to label ids to create a semantic segmentation map. + instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): + A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an + instance segmentation map where each pixel represents an instance id. Can be provided as a single + dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map + instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` @@ -388,7 +398,7 @@ def encode_inputs( - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if - *"pixel_mask"* is in `self.model_input_names`). + `pixel_mask` is in `self.model_input_names`). - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model (when `annotations` are provided). - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when @@ -402,10 +412,17 @@ def encode_inputs( if segmentation_maps is not None: segmentation_maps = map(np.array, segmentation_maps) converted_segmentation_maps = [] - for segmentation_map in segmentation_maps: - converted_segmentation_map = self.convert_segmentation_map_to_binary_masks( - segmentation_map, instance_id_to_semantic_id - ) + + for i, segmentation_map in enumerate(segmentation_maps): + # Use instance2class_id mapping per image + if isinstance(instance_id_to_semantic_id, List): + converted_segmentation_map = self.convert_segmentation_map_to_binary_masks( + segmentation_map, instance_id_to_semantic_id[i] + ) + else: + converted_segmentation_map = self.convert_segmentation_map_to_binary_masks( + segmentation_map, instance_id_to_semantic_id + ) converted_segmentation_maps.append(converted_segmentation_map) annotations = [] @@ -469,7 +486,7 @@ def post_process_segmentation( Returns: `torch.Tensor`: - A tensor of shape (`batch_size, num_labels, height, width`). + A tensor of shape (`batch_size, num_class_labels, height, width`). """ # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] class_queries_logits = outputs.class_queries_logits From 4bd36f1853501c453dc0ae994f789311468b87bc Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 13 Sep 2022 09:19:43 +0100 Subject: [PATCH 271/539] Generate: add model class validation (#18902) --- src/transformers/generation_flax_utils.py | 32 +++++++++++++- src/transformers/generation_tf_utils.py | 40 ++++++++++++++---- src/transformers/generation_utils.py | 42 +++++++++++++++---- .../models/openai/modeling_openai.py | 5 ++- .../models/openai/modeling_tf_openai.py | 3 ++ 5 files changed, 106 insertions(+), 16 deletions(-) diff --git a/src/transformers/generation_flax_utils.py b/src/transformers/generation_flax_utils.py index 353df6fdbba636..51d00efc106855 100644 --- a/src/transformers/generation_flax_utils.py +++ b/src/transformers/generation_flax_utils.py @@ -36,6 +36,11 @@ FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) +from .models.auto import ( + FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, +) from .utils import ModelOutput, logging @@ -161,6 +166,30 @@ def _adapt_logits_for_beam_search(self, logits): """ return logits + def _validate_model_class(self): + """ + Confirms that the model class is compatible with generation. If not, raises an exception that points to the + right class to use. + """ + if not hasattr(self, "prepare_inputs_for_generation"): + generate_compatible_mappings = [ + FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, + FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + ] + generate_compatible_classes = set() + for model_mapping in generate_compatible_mappings: + supported_models = model_mapping.get(type(self.config), default=None) + if supported_models is not None: + generate_compatible_classes.add(supported_models.__name__) + exception_message = ( + f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " + "it doesn't have a language model head." + ) + if generate_compatible_classes: + exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" + raise TypeError(exception_message) + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" unused_model_args = [] @@ -281,7 +310,8 @@ def generate( >>> outputs = model.generate(input_ids=input_ids, max_length=20, top_k=30, do_sample=True) >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ```""" - # Validate model kwargs + # Validate the `.generate()` call + self._validate_model_class() self._validate_model_kwargs(model_kwargs.copy()) # set init values diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index 86149db0c16ef6..5652b0e180de85 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -35,6 +35,12 @@ TFTopKLogitsWarper, TFTopPLogitsWarper, ) +from .models.auto import ( + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + TF_MODEL_FOR_VISION_2_SEQ_MAPPING, +) from .tf_utils import shape_list, stable_softmax from .utils import ModelOutput, logging @@ -357,12 +363,6 @@ def seed_generator(self): supports_xla_generation = True - def prepare_inputs_for_generation(self, inputs, **kwargs): - """ - Implement in subclasses of [`TFPreTrainedModel`] for custom behavior to prepare inputs in the generate method. - """ - return {"input_ids": inputs} - def _use_cache(self, outputs, use_cache): """During generation, decide whether to pass the `past` variable to the next forward pass.""" use_cache = getattr(self.config, "use_cache", False) @@ -1290,6 +1290,31 @@ def adjust_logits_during_generation( else: return logits + def _validate_model_class(self): + """ + Confirms that the model class is compatible with generation. If not, raises an exception that points to the + right class to use. + """ + if not hasattr(self, "prepare_inputs_for_generation"): + generate_compatible_mappings = [ + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_VISION_2_SEQ_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + ] + generate_compatible_classes = set() + for model_mapping in generate_compatible_mappings: + supported_models = model_mapping.get(type(self.config), default=None) + if supported_models is not None: + generate_compatible_classes.add(supported_models.__name__) + exception_message = ( + f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " + "it doesn't have a language model head." + ) + if generate_compatible_classes: + exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" + raise TypeError(exception_message) + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" # Excludes arguments that are handled before calling any model function @@ -1508,7 +1533,8 @@ def _generate( # generate sequences without allowing bad_words to be generated outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) ```""" - # 0. Validate model kwargs + # 0. Validate the `.generate()` call + self._validate_model_class() self._validate_model_kwargs(model_kwargs.copy()) # 1. Set generation parameters if not already defined diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 17de17f79e1ce9..7544473d7838ec 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -51,6 +51,13 @@ StoppingCriteriaList, validate_stopping_criteria, ) +from .models.auto import ( + MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + MODEL_FOR_VISION_2_SEQ_MAPPING, +) from .pytorch_utils import torch_int_div from .utils import ModelOutput, logging @@ -463,12 +470,6 @@ def _can_retrieve_inputs_from_name( return can_retrieve_inputs - def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]: - """ - Implement in subclasses of [`PreTrainedModel`] for custom behavior to prepare inputs in the generate method. - """ - return {"input_ids": input_ids} - def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor: """ Implement in subclasses of [`PreTrainedModel`] for custom behavior to adjust the logits in the generate method. @@ -840,6 +841,32 @@ def compute_transition_beam_scores( return transition_scores + def _validate_model_class(self): + """ + Confirms that the model class is compatible with generation. If not, raises an exception that points to the + right class to use. + """ + if not hasattr(self, "prepare_inputs_for_generation"): + generate_compatible_mappings = [ + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, + MODEL_FOR_VISION_2_SEQ_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + ] + generate_compatible_classes = set() + for model_mapping in generate_compatible_mappings: + supported_models = model_mapping.get(type(self.config), default=None) + if supported_models is not None: + generate_compatible_classes.add(supported_models.__name__) + exception_message = ( + f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " + "it doesn't have a language model head." + ) + if generate_compatible_classes: + exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" + raise TypeError(exception_message) + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): """Validates model kwargs for generation. Generate argument typos will also be caught here.""" # Excludes arguments that are handled before calling any model function @@ -1142,7 +1169,8 @@ def generate( >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) ['Paris ist eines der dichtesten besiedelten Gebiete Europas.'] ```""" - # 0. Validate model kwargs + # 0. Validate the `.generate()` call + self._validate_model_class() self._validate_model_kwargs(model_kwargs.copy()) # 1. Set generation parameters if not already defined diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index e5e5da5da0c9f6..2bd634abeb1154 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -20,7 +20,7 @@ import math import os from dataclasses import dataclass -from typing import Optional, Tuple, Union +from typing import Any, Dict, Optional, Tuple, Union import torch from torch import nn @@ -607,6 +607,9 @@ def forward( attentions=transformer_outputs.attentions, ) + def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]: + return {"input_ids": input_ids} + @add_start_docstrings( """ diff --git a/src/transformers/models/openai/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py index b6056233d5a8a8..e1bb1a5f6f6c1e 100644 --- a/src/transformers/models/openai/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -638,6 +638,9 @@ def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput: return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns) + def prepare_inputs_for_generation(self, inputs, **kwargs): + return {"input_ids": inputs} + @add_start_docstrings( """ From 2848c9ce42eba9717fc4cf60ebbbeb0f18ee5c9a Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 13 Sep 2022 17:28:46 +0530 Subject: [PATCH 272/539] Add type hints for M2M (#18998) * added type hints * fixed typo --- .../models/m2m_100/modeling_m2m_100.py | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/src/transformers/models/m2m_100/modeling_m2m_100.py b/src/transformers/models/m2m_100/modeling_m2m_100.py index 3abe593bb129a7..092496f9f2d128 100755 --- a/src/transformers/models/m2m_100/modeling_m2m_100.py +++ b/src/transformers/models/m2m_100/modeling_m2m_100.py @@ -17,7 +17,7 @@ import math import random -from typing import Optional, Tuple, Union +from typing import List, Optional, Tuple, Union import torch from torch import nn @@ -712,13 +712,13 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = def forward( self, - input_ids=None, - attention_mask=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, ): r""" Args: @@ -887,18 +887,18 @@ def __init__(self, config: M2M100Config, embed_tokens: Optional[nn.Embedding] = def forward( self, - input_ids=None, - attention_mask=None, - encoder_hidden_states=None, - encoder_attention_mask=None, - head_mask=None, - cross_attn_head_mask=None, - past_key_values=None, - inputs_embeds=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, ): r""" Args: From 2886f7f08ac2cd9c0255e3b66d4ee95884b96087 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 13 Sep 2022 14:04:14 +0200 Subject: [PATCH 273/539] Fix tokenizer for XLMRobertaXL (#19004) Co-authored-by: ydshieh --- src/transformers/models/auto/tokenization_auto.py | 8 +++++++- .../models/xlm_roberta_xl/modeling_xlm_roberta_xl.py | 2 +- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 86cc4eba94f3da..7aa7627cab2cb5 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -271,7 +271,13 @@ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, ), ), - ("xlm-roberta-xl", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)), + ( + "xlm-roberta-xl", + ( + "XLMRobertaTokenizer" if is_sentencepiece_available() else None, + "XLMRobertaTokenizerFast" if is_tokenizers_available() else None, + ), + ), ( "xlnet", ( diff --git a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py index ca1c35bf650586..e0634d9a6ae668 100644 --- a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py @@ -54,7 +54,7 @@ _CHECKPOINT_FOR_DOC = "xlm-roberta-xlarge" _CONFIG_FOR_DOC = "XLMRobertaXLConfig" -_TOKENIZER_FOR_DOC = "RobertaTokenizer" +_TOKENIZER_FOR_DOC = "XLMRobertaTokenizer" XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/xlm-roberta-xl", From 420f6c5ee3fb15a683bdbaf771f751edb85f1c19 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Tue, 13 Sep 2022 07:04:03 -0700 Subject: [PATCH 274/539] Update default revision for document-question-answering (#18938) Co-authored-by: Ankur Goyal --- src/transformers/models/auto/modeling_auto.py | 2 +- src/transformers/models/auto/modeling_tf_auto.py | 2 +- src/transformers/pipelines/__init__.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 8efde09b96ea64..9423692c34a510 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -920,7 +920,7 @@ class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): AutoModelForDocumentQuestionAnswering = auto_class_update( AutoModelForDocumentQuestionAnswering, head_doc="document question answering", - checkpoint_for_example='impira/layoutlm-document-qa", revision="3dc6de3', + checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3', ) diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index ba1e74e14caf63..77ab03d38aeb1c 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -532,7 +532,7 @@ class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass): TFAutoModelForDocumentQuestionAnswering = auto_class_update( TFAutoModelForDocumentQuestionAnswering, head_doc="document question answering", - checkpoint_for_example='impira/layoutlm-document-qa", revision="3dc6de3', + checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3', ) diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index e3f9e603b5111d..fa2b4fb244b7e4 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -222,7 +222,7 @@ "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (), "tf": (), "default": { - "model": {"pt": ("impira/layoutlm-document-qa", "3a93017")}, + "model": {"pt": ("impira/layoutlm-document-qa", "52e01b3")}, }, "type": "multimodal", }, From 00fc9217d14a19dad09fc6761c49fdd136272590 Mon Sep 17 00:00:00 2001 From: Rahul A R Date: Tue, 13 Sep 2022 11:29:48 -0400 Subject: [PATCH 275/539] Fixed bug which caused overwrite_cache to always be True (#19000) * fixed bug which caused overwrite_cache to always be True (#18967). * reformatting changes --- examples/pytorch/language-modeling/run_clm_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_mlm_no_trainer.py | 2 +- .../question-answering/run_qa_beam_search_no_trainer.py | 2 +- examples/pytorch/question-answering/run_qa_no_trainer.py | 2 +- .../pytorch/summarization/run_summarization_no_trainer.py | 2 +- examples/pytorch/translation/run_translation_no_trainer.py | 2 +- .../quantization-qdqbert/evaluate-hf-trt-qa.py | 4 +--- 7 files changed, 7 insertions(+), 9 deletions(-) diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index c0fcbbd3ce5857..731aff7acbccab 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -183,7 +183,7 @@ def parse_args(): help="The number of processes to use for the preprocessing.", ) parser.add_argument( - "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" + "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files." diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index 2a1951f83de217..c336a6acc5c19a 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -190,7 +190,7 @@ def parse_args(): help="The number of processes to use for the preprocessing.", ) parser.add_argument( - "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" + "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss" diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index c3fdcdae9a8f12..5ab5a3d1756961 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -232,7 +232,7 @@ def parse_args(): ), ) parser.add_argument( - "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" + "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_predict_samples", diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index 926e24c4dd7b93..f10191fbb5ba13 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -253,7 +253,7 @@ def parse_args(): ), ) parser.add_argument( - "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" + "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_predict_samples", diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index 89365b4de4e98c..ec9f5fb6190aa7 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -138,7 +138,7 @@ def parse_args(): help="The number of processes to use for the preprocessing.", ) parser.add_argument( - "--overwrite_cache", type=bool, default=None, help="Overwrite the cached training and evaluation sets" + "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_target_length", diff --git a/examples/pytorch/translation/run_translation_no_trainer.py b/examples/pytorch/translation/run_translation_no_trainer.py index 92bc97e355f884..4c7ac38e601a59 100644 --- a/examples/pytorch/translation/run_translation_no_trainer.py +++ b/examples/pytorch/translation/run_translation_no_trainer.py @@ -168,7 +168,7 @@ def parse_args(): help="The number of processes to use for the preprocessing.", ) parser.add_argument( - "--overwrite_cache", type=bool, default=None, help="Overwrite the cached training and evaluation sets" + "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_length", diff --git a/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py b/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py index 2a089963039592..bd0b1157b01d47 100755 --- a/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py +++ b/examples/research_projects/quantization-qdqbert/evaluate-hf-trt-qa.py @@ -135,9 +135,7 @@ parser.add_argument( "--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data." ) -parser.add_argument( - "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" -) +parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets") parser.add_argument( "--fp16", action="store_true", From d14af22c5c93335f0d521410c16d161087164313 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Tue, 13 Sep 2022 23:56:20 +0800 Subject: [PATCH 276/539] add DDP HPO support for optuna (#19002) only main_process will have HPO, and pass argument to other process Signed-off-by: Wang, Yi A Signed-off-by: Wang, Yi A --- src/transformers/integrations.py | 61 ++++++++++++++++++++++---------- src/transformers/trainer.py | 2 +- 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/src/transformers/integrations.py b/src/transformers/integrations.py index 73ff50453b6f43..b40dc8f42d9d5e 100644 --- a/src/transformers/integrations.py +++ b/src/transformers/integrations.py @@ -159,26 +159,49 @@ def default_hp_search_backend(): def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: import optuna - def _objective(trial, checkpoint_dir=None): - checkpoint = None - if checkpoint_dir: - for subdir in os.listdir(checkpoint_dir): - if subdir.startswith(PREFIX_CHECKPOINT_DIR): - checkpoint = os.path.join(checkpoint_dir, subdir) - trainer.objective = None - trainer.train(resume_from_checkpoint=checkpoint, trial=trial) - # If there hasn't been any evaluation during the training loop. - if getattr(trainer, "objective", None) is None: - metrics = trainer.evaluate() - trainer.objective = trainer.compute_objective(metrics) - return trainer.objective + if trainer.args.process_index == 0: - timeout = kwargs.pop("timeout", None) - n_jobs = kwargs.pop("n_jobs", 1) - study = optuna.create_study(direction=direction, **kwargs) - study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs) - best_trial = study.best_trial - return BestRun(str(best_trial.number), best_trial.value, best_trial.params) + def _objective(trial, checkpoint_dir=None): + checkpoint = None + if checkpoint_dir: + for subdir in os.listdir(checkpoint_dir): + if subdir.startswith(PREFIX_CHECKPOINT_DIR): + checkpoint = os.path.join(checkpoint_dir, subdir) + trainer.objective = None + trainer._hp_search_setup(trial) + if trainer.args.world_size > 1: + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") + torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) + trainer.train(resume_from_checkpoint=checkpoint) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + return trainer.objective + + timeout = kwargs.pop("timeout", None) + n_jobs = kwargs.pop("n_jobs", 1) + study = optuna.create_study(direction=direction, **kwargs) + study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs) + best_trial = study.best_trial + return BestRun(str(best_trial.number), best_trial.value, best_trial.params) + else: + for i in range(n_trials): + trainer.objective = None + args_main_rank = list(pickle.dumps(trainer.args)) + if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: + raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") + torch.distributed.broadcast_object_list(args_main_rank, src=0) + local_rank = trainer.args.local_rank # backup the local_rank info + trainer.args = pickle.loads(bytes(args_main_rank)) + trainer.args.local_rank = local_rank + trainer.train(resume_from_checkpoint=None) + # If there hasn't been any evaluation during the training loop. + if getattr(trainer, "objective", None) is None: + metrics = trainer.evaluate() + trainer.objective = trainer.compute_objective(metrics) + return None def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index f7c3836d4afd42..c1cc3f92acb4d5 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -1210,7 +1210,7 @@ def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): value = type(old_attr)(value) setattr(self.args, key, value) if self.hp_search_backend == HPSearchBackend.OPTUNA: - logger.info("Trial:", trial.params) + logger.info(f"Trial: {trial.params}") if self.hp_search_backend == HPSearchBackend.SIGOPT: logger.info(f"SigOpt Assignments: {trial.assignments}") if self.hp_search_backend == HPSearchBackend.WANDB: From ad5045e3e39442f86a6b08cd87c04338134dd55a Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 13 Sep 2022 18:10:11 +0200 Subject: [PATCH 277/539] add missing `require_tf` for `TFOPTGenerationTest` (#19010) Co-authored-by: ydshieh --- tests/models/opt/test_modeling_tf_opt.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/models/opt/test_modeling_tf_opt.py b/tests/models/opt/test_modeling_tf_opt.py index 61d6aad53fc169..4e9972e4aaa195 100644 --- a/tests/models/opt/test_modeling_tf_opt.py +++ b/tests/models/opt/test_modeling_tf_opt.py @@ -315,6 +315,7 @@ def test_logits(self): self.assertTrue(np.allclose(logits, logits_meta, atol=1e-4)) +@require_tf @slow class TFOPTGenerationTest(unittest.TestCase): @property From f89f16a51ebd08c4b9728b87d63e1054df79d641 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Tue, 13 Sep 2022 13:11:24 -0400 Subject: [PATCH 278/539] Re-add support for single url files in objects download (#19014) --- src/transformers/configuration_utils.py | 7 +++- src/transformers/feature_extraction_utils.py | 5 +++ src/transformers/modeling_flax_utils.py | 5 +++ src/transformers/modeling_tf_utils.py | 5 +++ src/transformers/modeling_utils.py | 5 +++ src/transformers/tokenization_utils_base.py | 5 +++ src/transformers/utils/__init__.py | 2 ++ src/transformers/utils/hub.py | 35 +++++++++++++++++++- 8 files changed, 67 insertions(+), 2 deletions(-) diff --git a/src/transformers/configuration_utils.py b/src/transformers/configuration_utils.py index 41503255ac2adb..db8147b4dee349 100755 --- a/src/transformers/configuration_utils.py +++ b/src/transformers/configuration_utils.py @@ -32,7 +32,9 @@ PushToHubMixin, cached_file, copy_func, + download_url, extract_commit_hash, + is_remote_url, is_torch_available, logging, ) @@ -592,9 +594,12 @@ def _get_config_dict( is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)): - # Soecial case when pretrained_model_name_or_path is a local file + # Special case when pretrained_model_name_or_path is a local file resolved_config_file = pretrained_model_name_or_path is_local = True + elif is_remote_url(pretrained_model_name_or_path): + configuration_file = pretrained_model_name_or_path + resolved_config_file = download_url(pretrained_model_name_or_path) else: configuration_file = kwargs.pop("_configuration_file", CONFIG_NAME) diff --git a/src/transformers/feature_extraction_utils.py b/src/transformers/feature_extraction_utils.py index 394d67a8c5a1a7..85c751b8410730 100644 --- a/src/transformers/feature_extraction_utils.py +++ b/src/transformers/feature_extraction_utils.py @@ -31,8 +31,10 @@ TensorType, cached_file, copy_func, + download_url, is_flax_available, is_offline_mode, + is_remote_url, is_tf_available, is_torch_available, logging, @@ -386,6 +388,9 @@ def get_feature_extractor_dict( if os.path.isfile(pretrained_model_name_or_path): resolved_feature_extractor_file = pretrained_model_name_or_path is_local = True + elif is_remote_url(pretrained_model_name_or_path): + feature_extractor_file = pretrained_model_name_or_path + resolved_feature_extractor_file = download_url(pretrained_model_name_or_path) else: feature_extractor_file = FEATURE_EXTRACTOR_NAME try: diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index b19f3db77e1900..92d307e8cd7e2d 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -47,8 +47,10 @@ add_start_docstrings_to_model_forward, cached_file, copy_func, + download_url, has_file, is_offline_mode, + is_remote_url, logging, replace_return_docstrings, ) @@ -677,6 +679,9 @@ def from_pretrained( elif os.path.isfile(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path is_local = True + elif is_remote_url(pretrained_model_name_or_path): + archive_file = pretrained_model_name_or_path + resolved_archive_file = download_url(pretrained_model_name_or_path) else: filename = WEIGHTS_NAME if from_pt else FLAX_WEIGHTS_NAME try: diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 2c1febd43c8d20..160a68c9209dd7 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -54,9 +54,11 @@ ModelOutput, PushToHubMixin, cached_file, + download_url, find_labels, has_file, is_offline_mode, + is_remote_url, logging, requires_backends, working_or_temp_dir, @@ -2345,6 +2347,9 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): elif os.path.isfile(pretrained_model_name_or_path + ".index"): archive_file = pretrained_model_name_or_path + ".index" is_local = True + elif is_remote_url(pretrained_model_name_or_path): + archive_file = pretrained_model_name_or_path + resolved_archive_file = download_url(pretrained_model_name_or_path) else: # set correct filename filename = WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 68fe7f94d2c7f4..2f305ff8dd098f 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -59,10 +59,12 @@ PushToHubMixin, cached_file, copy_func, + download_url, has_file, is_accelerate_available, is_bitsandbytes_available, is_offline_mode, + is_remote_url, logging, replace_return_docstrings, ) @@ -1998,6 +2000,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P ) archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index") is_local = True + elif is_remote_url(pretrained_model_name_or_path): + archive_file = pretrained_model_name_or_path + resolved_archive_file = download_url(pretrained_model_name_or_path) else: # set correct filename if from_tf: diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index 0b01163c0b15e6..5062a7bfb99991 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -42,9 +42,11 @@ add_end_docstrings, cached_file, copy_func, + download_url, extract_commit_hash, is_flax_available, is_offline_mode, + is_remote_url, is_tf_available, is_tokenizers_available, is_torch_available, @@ -1680,6 +1682,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], FutureWarning, ) file_id = list(cls.vocab_files_names.keys())[0] + vocab_files[file_id] = pretrained_model_name_or_path else: # At this point pretrained_model_name_or_path is either a directory or a model identifier name @@ -1723,6 +1726,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None + elif is_remote_url(file_path): + resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies) else: resolved_vocab_files[file_id] = cached_file( pretrained_model_name_or_path, diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 2a2a4c41257492..44c3e1807860f6 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -63,6 +63,7 @@ cached_file, default_cache_path, define_sagemaker_information, + download_url, extract_commit_hash, get_cached_models, get_file_from_repo, @@ -70,6 +71,7 @@ has_file, http_user_agent, is_offline_mode, + is_remote_url, move_cache, send_example_telemetry, ) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 3e5863e4eff54c..8bdf360b029cda 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -19,10 +19,12 @@ import re import shutil import sys +import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union +from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub @@ -37,7 +39,7 @@ whoami, ) from huggingface_hub.constants import HUGGINGFACE_HEADER_X_LINKED_ETAG, HUGGINGFACE_HEADER_X_REPO_COMMIT -from huggingface_hub.file_download import REGEX_COMMIT_HASH +from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, @@ -124,6 +126,11 @@ def is_offline_mode(): _CACHED_NO_EXIST = object() +def is_remote_url(url_or_filename): + parsed = urlparse(url_or_filename) + return parsed.scheme in ("http", "https") + + def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: """ Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, @@ -541,6 +548,32 @@ def get_file_from_repo( ) +def download_url(url, proxies=None): + """ + Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is + for deprecated behavior allowing to download config/models with a single url instead of using the Hub. + + Args: + url (`str`): The url of the file to download. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + + Returns: + `str`: The location of the temporary file where the url was downloaded. + """ + warnings.warn( + f"Using `from_pretrained` with the url of a file (here {url}) is deprecated and won't be possible anymore in" + " v5 of Transformers. You should host your file on the Hub (hf.co) instead and use the repository ID. Note" + " that this is not compatible with the caching system (your file will be downloaded at each execution) or" + " multiple processes (each process will download the file in a different temporary file)." + ) + tmp_file = tempfile.mktemp() + with open(tmp_file, "wb") as f: + http_get(url, f, proxies=proxies) + return tmp_file + + def has_file( path_or_repo: Union[str, os.PathLike], filename: str, From 4157e3cd7e2bb5a7be6dc065a3e20c49cc1300ab Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Tue, 13 Sep 2022 18:16:36 +0100 Subject: [PATCH 279/539] new length penalty docstring (#19006) --- src/transformers/configuration_utils.py | 5 ++++- src/transformers/generation_beam_search.py | 14 +++++++------ src/transformers/generation_tf_utils.py | 21 +++++++++++-------- src/transformers/generation_utils.py | 7 ++++--- .../models/fsmt/configuration_fsmt.py | 5 ++++- src/transformers/models/rag/modeling_rag.py | 8 +++---- .../models/rag/modeling_tf_rag.py | 8 +++---- 7 files changed, 40 insertions(+), 28 deletions(-) diff --git a/src/transformers/configuration_utils.py b/src/transformers/configuration_utils.py index db8147b4dee349..3fdc0f265f6331 100755 --- a/src/transformers/configuration_utils.py +++ b/src/transformers/configuration_utils.py @@ -148,7 +148,10 @@ class PretrainedConfig(PushToHubMixin): Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0 means no penalty. length_penalty (`float`, *optional*, defaults to 1): - Exponential penalty to the length that will be used by default in the `generate` method of the model. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can only occur once. diff --git a/src/transformers/generation_beam_search.py b/src/transformers/generation_beam_search.py index 7c50c0d7acdccc..902160b228fdc2 100644 --- a/src/transformers/generation_beam_search.py +++ b/src/transformers/generation_beam_search.py @@ -138,9 +138,10 @@ class BeamSearchScorer(BeamScorer): Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be allocated. length_penalty (`float`, *optional*, defaults to 1.0): - Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the - model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer - sequences. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. do_early_stopping (`bool`, *optional*, defaults to `False`): Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): @@ -405,9 +406,10 @@ class ConstrainedBeamSearchScorer(BeamScorer): Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be allocated. length_penalty (`float`, *optional*, defaults to 1.0): - Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the - model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer - sequences. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. do_early_stopping (`bool`, *optional*, defaults to `False`): Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index 5652b0e180de85..30319dcce389ed 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -455,10 +455,10 @@ def generate( eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to 1.0): - Exponential penalty to the length. 1.0 means no penalty. - - Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in - order to encourage the model to produce longer sequences. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent + to the sequence length, which in turn is used to divide the score of the sequence. Since the score is + the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, + while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(`List[int]`, *optional*): @@ -1419,10 +1419,10 @@ def _generate( eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to 1.0): - Exponential penalty to the length. 1.0 means no penalty. - - Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in - order to encourage the model to produce longer sequences. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent + to the sequence length, which in turn is used to divide the score of the sequence. Since the score is + the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, + while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(`List[int]`, *optional*): @@ -2657,7 +2657,10 @@ def beam_search( eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to 1.0): - Exponential penalty to the length. 1.0 means no penalty. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent + to the sequence length, which in turn is used to divide the score of the sequence. Since the score is + the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, + while `length_penalty` < 0.0 encourages shorter sequences. early_stopping (`bool`, *optional*, defaults to `False`): Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. logits_processor (`[TFLogitsProcessorList]`, *optional*): diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 7544473d7838ec..84f1a6f0392a38 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1005,9 +1005,10 @@ def generate( eos_token_id (`int`, *optional*, defaults to `model.config.eos_token_id`): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to `model.config.length_penalty` or 1.0 if the config does not set any value): - Exponential penalty to the length. 1.0 means that the beam score is penalized by the sequence length. - 0.0 means no penalty. Set to values < 0.0 in order to encourage the model to generate longer - sequences, to a value > 0.0 in order to encourage the model to produce shorter sequences. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent + to the sequence length, which in turn is used to divide the score of the sequence. Since the score is + the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, + while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to `model.config.no_repeat_ngram_size` or 0 if the config does not set any value): If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (`int`, *optional*, defaults to `model.config.encoder_no_repeat_ngram_size` or 0 if the config does not set any value): diff --git a/src/transformers/models/fsmt/configuration_fsmt.py b/src/transformers/models/fsmt/configuration_fsmt.py index 14298d6a1cc029..de96c768a20c03 100644 --- a/src/transformers/models/fsmt/configuration_fsmt.py +++ b/src/transformers/models/fsmt/configuration_fsmt.py @@ -107,7 +107,10 @@ class FSMTConfig(PretrainedConfig): Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means no beam search. length_penalty (`float`, *optional*, defaults to 1) - Exponential penalty to the length that will be used by default in the `generate` method of the model. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. early_stopping (`bool`, *optional*, defaults to `False`) Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not. diff --git a/src/transformers/models/rag/modeling_rag.py b/src/transformers/models/rag/modeling_rag.py index 41af393c671032..45b606905362f8 100644 --- a/src/transformers/models/rag/modeling_rag.py +++ b/src/transformers/models/rag/modeling_rag.py @@ -1463,10 +1463,10 @@ def generate( eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to 1.0): - Exponential penalty to the length. 1.0 means no penalty. - - Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in - order to encourage the model to produce longer sequences. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent + to the sequence length, which in turn is used to divide the score of the sequence. Since the score is + the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, + while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0): diff --git a/src/transformers/models/rag/modeling_tf_rag.py b/src/transformers/models/rag/modeling_tf_rag.py index 26482026baa8f8..a31b2d45217e8d 100644 --- a/src/transformers/models/rag/modeling_tf_rag.py +++ b/src/transformers/models/rag/modeling_tf_rag.py @@ -1054,10 +1054,10 @@ def generate( eos_token_id (`int`, *optional*): The id of the *end-of-sequence* token. length_penalty (`float`, *optional*, defaults to 1.0): - Exponential penalty to the length. 1.0 means no penalty. - - Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in - order to encourage the model to produce longer sequences. + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent + to the sequence length, which in turn is used to divide the score of the sequence. Since the score is + the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, + while `length_penalty` < 0.0 encourages shorter sequences. no_repeat_ngram_size (`int`, *optional*, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(`List[int]`, *optional*): From 5a70a77bfa07e8761b195f4239297bf22fa40ad8 Mon Sep 17 00:00:00 2001 From: Ahmed Elnaggar Date: Wed, 14 Sep 2022 10:12:51 +0200 Subject: [PATCH 280/539] Add Support to Gradient Checkpointing for LongT5 (#18977) FlaxLongT5PreTrainedModel is missing "enable_gradient_checkpointing" function. This gives an error if someone tries to enable gradient checkpointing for longt5. This pull request fixes it. --- src/transformers/models/longt5/modeling_flax_longt5.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/transformers/models/longt5/modeling_flax_longt5.py b/src/transformers/models/longt5/modeling_flax_longt5.py index 224515cd12a200..e2cce0a3f4c623 100644 --- a/src/transformers/models/longt5/modeling_flax_longt5.py +++ b/src/transformers/models/longt5/modeling_flax_longt5.py @@ -1686,6 +1686,13 @@ def __init__( module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + def enable_gradient_checkpointing(self): + self._module = self.module_class( + config=self.config, + dtype=self.dtype, + gradient_checkpointing=True, + ) + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") From 59407bbeb31fff8340938768051c9daabd38d7a7 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Wed, 14 Sep 2022 11:45:21 +0200 Subject: [PATCH 281/539] Add Deformable DETR (#17281) * First draft * More improvements * Improve model, add custom CUDA code * Import torch before * Add script that imports custom layer * Add everything in new ops directory * Import custom layer in modeling file * Fix ARCHIVE_MAP typo * Creating the custom kernel on the fly. * Import custom layer in modeling file * More improvements * Fix CUDA loading * More improvements * Improve conversion script * Improve conversion script * Make it work until encoder_outputs * Make forward pass work * More improvements * Make logits match original implementation * Make implementation also support single_scale model * Add support for single_scale and dilation checkpoint * Add support for with_box_refine model * Support also two stage model * Improve tests * Fix more tests * Make more tests pass * Upload all models to the hub * Clean up some code * Improve decoder outputs * Rename intermediate hidden states and reference points * Improve model outputs * Move tests to dedicated folder * Improve model outputs * Fix retain_grad test * Improve docs * Clean up and make test_initialization pass * Improve variable names * Add copied from statements * Improve docs * Fix style * Improve docs * Improve docs, move tests to model folder * Fix rebase * Remove DetrForSegmentation from auto mapping * Apply suggestions from code review * Improve variable names and docstrings * Apply some more suggestions from code review * Apply suggestion from code review * better docs and variables names * hint to num_queries and two_stage confusion * remove asserts and code refactor * add exception if two_stage is True and with_box_refine is False * use f-strings * Improve docs and variable names * Fix code quality * Fix rebase * Add require_torch_gpu decorator * Add pip install ninja to CI jobs * Apply suggestion of @sgugger * Remove DeformableDetrForObjectDetection from auto mapping * Remove DeformableDetrModel from auto mapping * Add model to toctree * Add model back to mappings, skip model in pipeline tests * Apply @sgugger's suggestion * Fix imports in the init * Fix copies * Add CPU implementation * Comment out GPU function * Undo previous change * Apply more suggestions * Remove require_torch_gpu annotator * Fix quality * Add logger.info * Fix logger * Fix variable names * Fix initializaztion * Add missing initialization * Update checkpoint name * Add model to doc tests * Add CPU/GPU equivalence test * Add Deformable DETR to pipeline tests * Skip model for object detection pipeline Co-authored-by: Nicolas Patry Co-authored-by: Nouamane Tazi Co-authored-by: Sylvain Gugger --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/deformable_detr.mdx | 50 + setup.py | 1 + src/transformers/__init__.py | 24 +- src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + .../models/auto/feature_extraction_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 2 + .../models/deformable_detr/__init__.py | 61 + .../configuration_deformable_detr.py | 227 ++ .../convert_deformable_detr_to_pytorch.py | 237 ++ .../custom_kernel/cpu/ms_deform_attn_cpu.cpp | 40 + .../custom_kernel/cpu/ms_deform_attn_cpu.h | 32 + .../custom_kernel/cuda/ms_deform_attn_cuda.cu | 156 ++ .../cuda/ms_deform_attn_cuda.cuh | 1467 ++++++++++ .../custom_kernel/cuda/ms_deform_attn_cuda.h | 29 + .../cuda/ms_deform_im2col_cuda.cuh | 1327 +++++++++ .../custom_kernel/ms_deform_attn.h | 61 + .../deformable_detr/custom_kernel/vision.cpp | 16 + .../models/deformable_detr/load_custom.py | 51 + .../modeling_deformable_detr.py | 2465 +++++++++++++++++ src/transformers/models/detr/modeling_detr.py | 143 +- .../models/maskformer/modeling_maskformer.py | 61 +- .../models/yolos/modeling_yolos.py | 51 +- .../utils/dummy_timm_and_vision_objects.py | 24 + src/transformers/utils/dummy_timm_objects.py | 32 - tests/models/deformable_detr/__init__.py | 0 .../test_modeling_deformable_detr.py | 625 +++++ .../test_pipelines_object_detection.py | 6 + utils/check_repo.py | 2 + utils/documentation_tests.txt | 1 + 36 files changed, 7048 insertions(+), 156 deletions(-) create mode 100644 docs/source/en/model_doc/deformable_detr.mdx create mode 100644 src/transformers/models/deformable_detr/__init__.py create mode 100644 src/transformers/models/deformable_detr/configuration_deformable_detr.py create mode 100644 src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py create mode 100644 src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.cpp create mode 100644 src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.h create mode 100644 src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cu create mode 100644 src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cuh create mode 100644 src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.h create mode 100644 src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_im2col_cuda.cuh create mode 100644 src/transformers/models/deformable_detr/custom_kernel/ms_deform_attn.h create mode 100644 src/transformers/models/deformable_detr/custom_kernel/vision.cpp create mode 100644 src/transformers/models/deformable_detr/load_custom.py create mode 100755 src/transformers/models/deformable_detr/modeling_deformable_detr.py delete mode 100644 src/transformers/utils/dummy_timm_objects.py create mode 100644 tests/models/deformable_detr/__init__.py create mode 100644 tests/models/deformable_detr/test_modeling_deformable_detr.py diff --git a/README.md b/README.md index 6edaf4f012fc71..570f12ac44e06a 100644 --- a/README.md +++ b/README.md @@ -285,6 +285,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch. +1. **[Deformable DETR](https://huggingface.co/docs/transformers/main/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. 1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. diff --git a/README_ko.md b/README_ko.md index 18c300975833c9..c6016624a861c6 100644 --- a/README_ko.md +++ b/README_ko.md @@ -237,6 +237,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch. +1. **[Deformable DETR](https://huggingface.co/docs/transformers/main/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. 1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. diff --git a/README_zh-hans.md b/README_zh-hans.md index 77710265651f59..f3c07bfb361d11 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -261,6 +261,7 @@ conda install -c huggingface transformers 1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (来自 Microsoft) 伴随论文 [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) 由 Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen 发布。 1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (来自 Microsoft) 伴随论文 [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) 由 Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen 发布。 1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (来自 Berkeley/Facebook/Google) 伴随论文 [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) 由 Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch 发布。 +1. **[Deformable DETR](https://huggingface.co/docs/transformers/main/model_doc/deformable_detr)** (来自 SenseTime Research) 伴随论文 [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) 由 Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai 发布。 1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (来自 Facebook) 伴随论文 [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) 由 Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou 发布。 1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (来自 Facebook) 伴随论文 [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) 由 Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko 发布。 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (来自 Microsoft Research) 伴随论文 [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) 由 Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 196f4741848371..2ef861d0592358 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -273,6 +273,7 @@ conda install -c huggingface transformers 1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch. +1. **[Deformable DETR](https://huggingface.co/docs/transformers/main/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. 1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index c4c5f2162aba3c..b59fdfbc46d91a 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -364,6 +364,8 @@ title: ConvNeXT - local: model_doc/cvt title: CvT + - local: model_doc/deformable_detr + title: Deformable DETR - local: model_doc/deit title: DeiT - local: model_doc/detr diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index fb2ff2d418229b..265fe39f25fdb9 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -77,6 +77,7 @@ The documentation is organized into five sections: 1. **[DeBERTa](model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[DeBERTa-v2](model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[Decision Transformer](model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch. +1. **[Deformable DETR](model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. 1. **[DeiT](model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. @@ -223,6 +224,7 @@ Flax), PyTorch, and/or TensorFlow. | DeBERTa | ✅ | ✅ | ✅ | ✅ | ❌ | | DeBERTa-v2 | ✅ | ✅ | ✅ | ✅ | ❌ | | Decision Transformer | ❌ | ❌ | ✅ | ❌ | ❌ | +| Deformable DETR | ❌ | ❌ | ✅ | ❌ | ❌ | | DeiT | ❌ | ❌ | ✅ | ✅ | ❌ | | DETR | ❌ | ❌ | ✅ | ❌ | ❌ | | DistilBERT | ✅ | ✅ | ✅ | ✅ | ✅ | diff --git a/docs/source/en/model_doc/deformable_detr.mdx b/docs/source/en/model_doc/deformable_detr.mdx new file mode 100644 index 00000000000000..7997b2f19d2e82 --- /dev/null +++ b/docs/source/en/model_doc/deformable_detr.mdx @@ -0,0 +1,50 @@ + + +# Deformable DETR + +## Overview + +The Deformable DETR model was proposed in [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. +Deformable DETR mitigates the slow convergence issues and limited feature spatial resolution of the original [DETR](detr) by leveraging a new deformable attention module which only attends to a small set of key sampling points around a reference. + +The abstract from the paper is the following: + +*DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10 times less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach.* + +Tips: + +- One can use the [`AutoFeatureExtractor`] API to prepare images (and optional targets) for the model. This will instantiate a [`DetrFeatureExtractor`] behind the scenes. +- Training Deformable DETR is equivalent to training the original [DETR](detr) model. Demo notebooks can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DETR). + + + + Deformable DETR architecture. Taken from the original paper. + +This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/fundamentalvision/Deformable-DETR). + +## DeformableDetrConfig + +[[autodoc]] DeformableDetrConfig + + +## DeformableDetrModel + +[[autodoc]] DeformableDetrModel + - forward + + +## DeformableDetrForObjectDetection + +[[autodoc]] DeformableDetrForObjectDetection + - forward \ No newline at end of file diff --git a/setup.py b/setup.py index 3145272ef6f061..84bd8f5d6eef50 100644 --- a/setup.py +++ b/setup.py @@ -411,6 +411,7 @@ def run(self): url="https://github.com/huggingface/transformers", package_dir={"": "src"}, packages=find_packages("src"), + package_data={"transformers": ["py.typed", "*.cu", "*.cpp", "*.cuh", "*.h"]}, zip_safe=False, extras_require=extras, entry_points={"console_scripts": ["transformers-cli=transformers.commands.transformers_cli:main"]}, diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index ee23c79db6b487..2671b37d8ebd60 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -187,6 +187,7 @@ "models.deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaTokenizer"], "models.deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config"], "models.decision_transformer": ["DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "DecisionTransformerConfig"], + "models.deformable_detr": ["DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeformableDetrConfig"], "models.deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig"], "models.detr": ["DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DetrConfig"], "models.dialogpt": [], @@ -682,12 +683,20 @@ if not (is_timm_available() and is_vision_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from .utils import dummy_timm_objects + from .utils import dummy_timm_and_vision_objects - _import_structure["utils.dummy_timm_objects"] = [ - name for name in dir(dummy_timm_objects) if not name.startswith("_") + _import_structure["utils.dummy_timm_and_vision_objects"] = [ + name for name in dir(dummy_timm_and_vision_objects) if not name.startswith("_") ] else: + _import_structure["models.deformable_detr"].extend( + [ + "DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", + "DeformableDetrForObjectDetection", + "DeformableDetrModel", + "DeformableDetrPreTrainedModel", + ] + ) _import_structure["models.detr"].extend( [ "DETR_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3072,6 +3081,7 @@ DECISION_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, DecisionTransformerConfig, ) + from .models.deformable_detr import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig from .models.deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig from .models.detr import DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DetrConfig from .models.distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer @@ -3502,8 +3512,14 @@ if not (is_timm_available() and is_vision_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: - from .utils.dummy_timm_objects import * + from .utils.dummy_timm_and_vision_objects import * else: + from .models.deformable_detr import ( + DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, + DeformableDetrForObjectDetection, + DeformableDetrModel, + DeformableDetrPreTrainedModel, + ) from .models.detr import ( DETR_PRETRAINED_MODEL_ARCHIVE_LIST, DetrForObjectDetection, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 0e2bad475a142e..6a206bb9684235 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -47,6 +47,7 @@ deberta, deberta_v2, decision_transformer, + deformable_detr, deit, detr, dialogpt, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 785518fbcd0817..ae0e88bd4a1e04 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -53,6 +53,7 @@ ("deberta", "DebertaConfig"), ("deberta-v2", "DebertaV2Config"), ("decision_transformer", "DecisionTransformerConfig"), + ("deformable_detr", "DeformableDetrConfig"), ("deit", "DeiTConfig"), ("detr", "DetrConfig"), ("distilbert", "DistilBertConfig"), @@ -182,6 +183,7 @@ ("data2vec-vision", "DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("deberta", "DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("deberta-v2", "DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("deformable_detr", "DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("deit", "DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("detr", "DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("distilbert", "DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -307,6 +309,7 @@ ("deberta", "DeBERTa"), ("deberta-v2", "DeBERTa-v2"), ("decision_transformer", "Decision Transformer"), + ("deformable_detr", "Deformable DETR"), ("deit", "DeiT"), ("detr", "DETR"), ("dialogpt", "DialoGPT"), diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index 625b79db06494e..015fd132ef0dc2 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -43,6 +43,7 @@ ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), + ("deformable_detr", "DetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("detr", "DetrFeatureExtractor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 9423692c34a510..9edfae0c89be85 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -53,6 +53,7 @@ ("deberta-v2", "DebertaV2Model"), ("decision_transformer", "DecisionTransformerModel"), ("decision_transformer_gpt2", "DecisionTransformerGPT2Model"), + ("deformable_detr", "DeformableDetrModel"), ("deit", "DeiTModel"), ("detr", "DetrModel"), ("distilbert", "DistilBertModel"), @@ -451,6 +452,7 @@ MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict( [ # Model for Object Detection mapping + ("deformable_detr", "DeformableDetrForObjectDetection"), ("detr", "DetrForObjectDetection"), ("yolos", "YolosForObjectDetection"), ] diff --git a/src/transformers/models/deformable_detr/__init__.py b/src/transformers/models/deformable_detr/__init__.py new file mode 100644 index 00000000000000..f70d937c7ff468 --- /dev/null +++ b/src/transformers/models/deformable_detr/__init__.py @@ -0,0 +1,61 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_timm_available + + +_import_structure = { + "configuration_deformable_detr": ["DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeformableDetrConfig"], +} + +try: + if not is_timm_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_deformable_detr"] = [ + "DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", + "DeformableDetrForObjectDetection", + "DeformableDetrModel", + "DeformableDetrPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_deformable_detr import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig + + try: + if not is_timm_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_deformable_detr import ( + DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, + DeformableDetrForObjectDetection, + DeformableDetrModel, + DeformableDetrPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/deformable_detr/configuration_deformable_detr.py b/src/transformers/models/deformable_detr/configuration_deformable_detr.py new file mode 100644 index 00000000000000..bded5233037c2a --- /dev/null +++ b/src/transformers/models/deformable_detr/configuration_deformable_detr.py @@ -0,0 +1,227 @@ +# coding=utf-8 +# Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Deformable DETR model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json", + # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr +} + + +class DeformableDetrConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`DeformableDetrModel`]. It is used to instantiate + a Deformable DETR model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the Deformable DETR + [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_queries (`int`, *optional*, defaults to 300): + Number of object queries, i.e. detection slots. This is the maximal number of objects + [`DeformableDetrModel`] can detect in a single image. In case `two_stage` is set to `True`, we use + `two_stage_num_proposals` instead. + d_model (`int`, *optional*, defaults to 256): + Dimension of the layers. + encoder_layers (`int`, *optional*, defaults to 6): + Number of encoder layers. + decoder_layers (`int`, *optional*, defaults to 6): + Number of decoder layers. + encoder_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer decoder. + decoder_ffn_dim (`int`, *optional*, defaults to 1024): + Dimension of the "intermediate" (often named feed-forward) layer in decoder. + encoder_ffn_dim (`int`, *optional*, defaults to 1024): + Dimension of the "intermediate" (often named feed-forward) layer in decoder. + activation_function (`str` or `function`, *optional*, defaults to `"relu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for activations inside the fully connected layer. + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + init_xavier_std (`float`, *optional*, defaults to 1): + The scaling factor used for the Xavier initialization gain in the HM Attention map module. + encoder_layerdrop: (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + decoder_layerdrop: (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + auxiliary_loss (`bool`, *optional*, defaults to `False`): + Whether auxiliary decoding losses (loss at each decoder layer) are to be used. + position_embedding_type (`str`, *optional*, defaults to `"sine"`): + Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. + backbone (`str`, *optional*, defaults to `"resnet50"`): + Name of convolutional backbone to use. Supports any convolutional backbone from the timm package. For a + list of all available models, see [this + page](https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model). + dilation (`bool`, *optional*, defaults to `False`): + Whether to replace stride with dilation in the last convolutional block (DC5). + class_cost (`float`, *optional*, defaults to 1): + Relative weight of the classification error in the Hungarian matching cost. + bbox_cost (`float`, *optional*, defaults to 5): + Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. + giou_cost (`float`, *optional*, defaults to 2): + Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. + mask_loss_coefficient (`float`, *optional*, defaults to 1): + Relative weight of the Focal loss in the panoptic segmentation loss. + dice_loss_coefficient (`float`, *optional*, defaults to 1): + Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. + bbox_loss_coefficient (`float`, *optional*, defaults to 5): + Relative weight of the L1 bounding box loss in the object detection loss. + giou_loss_coefficient (`float`, *optional*, defaults to 2): + Relative weight of the generalized IoU loss in the object detection loss. + eos_coefficient (`float`, *optional*, defaults to 0.1): + Relative classification weight of the 'no-object' class in the object detection loss. + num_feature_levels (`int`, *optional*, defaults to 4): + The number of input feature levels. + encoder_n_points (`int`, *optional*, defaults to 4): + The number of sampled keys in each feature level for each attention head in the encoder. + decoder_n_points (`int`, *optional*, defaults to 4): + The number of sampled keys in each feature level for each attention head in the decoder. + two_stage (`bool`, *optional*, defaults to `False`): + Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of + Deformable DETR, which are further fed into the decoder for iterative bounding box refinement. + two_stage_num_proposals (`int`, *optional*, defaults to 300): + The number of region proposals to be generated, in case `two_stage` is set to `True`. + with_box_refine (`bool`, *optional*, defaults to `False`): + Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes + based on the predictions from the previous layer. + + Examples: + + ```python + >>> from transformers import DeformableDetrModel, DeformableDetrConfig + + >>> # Initializing a Deformable DETR SenseTime/deformable-detr style configuration + >>> configuration = DeformableDetrConfig() + + >>> # Initializing a model from the SenseTime/deformable-detr style configuration + >>> model = DeformableDetrModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "deformable_detr" + attribute_map = { + "hidden_size": "d_model", + "num_attention_heads": "encoder_attention_heads", + } + + def __init__( + self, + num_queries=300, + max_position_embeddings=1024, + encoder_layers=6, + encoder_ffn_dim=1024, + encoder_attention_heads=8, + decoder_layers=6, + decoder_ffn_dim=1024, + decoder_attention_heads=8, + encoder_layerdrop=0.0, + decoder_layerdrop=0.0, + is_encoder_decoder=True, + activation_function="relu", + d_model=256, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + init_xavier_std=1.0, + return_intermediate=True, + auxiliary_loss=False, + position_embedding_type="sine", + backbone="resnet50", + dilation=False, + num_feature_levels=4, + encoder_n_points=4, + decoder_n_points=4, + two_stage=False, + two_stage_num_proposals=300, + with_box_refine=False, + class_cost=1, + bbox_cost=5, + giou_cost=2, + mask_loss_coefficient=1, + dice_loss_coefficient=1, + bbox_loss_coefficient=5, + giou_loss_coefficient=2, + eos_coefficient=0.1, + **kwargs + ): + self.num_queries = num_queries + self.max_position_embeddings = max_position_embeddings + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.decoder_layers = decoder_layers + self.decoder_attention_heads = decoder_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.init_xavier_std = init_xavier_std + self.encoder_layerdrop = encoder_layerdrop + self.decoder_layerdrop = decoder_layerdrop + self.auxiliary_loss = auxiliary_loss + self.position_embedding_type = position_embedding_type + self.backbone = backbone + self.dilation = dilation + # deformable attributes + self.num_feature_levels = num_feature_levels + self.encoder_n_points = encoder_n_points + self.decoder_n_points = decoder_n_points + self.two_stage = two_stage + self.two_stage_num_proposals = two_stage_num_proposals + self.with_box_refine = with_box_refine + if two_stage is True and with_box_refine is False: + raise ValueError("If two_stage is True, with_box_refine must be True.") + # Hungarian matcher + self.class_cost = class_cost + self.bbox_cost = bbox_cost + self.giou_cost = giou_cost + # Loss coefficients + self.mask_loss_coefficient = mask_loss_coefficient + self.dice_loss_coefficient = dice_loss_coefficient + self.bbox_loss_coefficient = bbox_loss_coefficient + self.giou_loss_coefficient = giou_loss_coefficient + self.eos_coefficient = eos_coefficient + super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) + + @property + def num_attention_heads(self) -> int: + return self.encoder_attention_heads + + @property + def hidden_size(self) -> int: + return self.d_model diff --git a/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py b/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py new file mode 100644 index 00000000000000..85ee723fb33968 --- /dev/null +++ b/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Deformable DETR checkpoints.""" + + +import argparse +import json +from pathlib import Path + +import torch +from PIL import Image + +import requests +from huggingface_hub import cached_download, hf_hub_url +from transformers import DeformableDetrConfig, DeformableDetrForObjectDetection, DetrFeatureExtractor +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +def rename_key(orig_key): + if "backbone.0.body" in orig_key: + orig_key = orig_key.replace("backbone.0.body", "backbone.conv_encoder.model") + if "transformer" in orig_key: + orig_key = orig_key.replace("transformer.", "") + if "norm1" in orig_key: + if "encoder" in orig_key: + orig_key = orig_key.replace("norm1", "self_attn_layer_norm") + else: + orig_key = orig_key.replace("norm1", "encoder_attn_layer_norm") + if "norm2" in orig_key: + if "encoder" in orig_key: + orig_key = orig_key.replace("norm2", "final_layer_norm") + else: + orig_key = orig_key.replace("norm2", "self_attn_layer_norm") + if "norm3" in orig_key: + orig_key = orig_key.replace("norm3", "final_layer_norm") + if "linear1" in orig_key: + orig_key = orig_key.replace("linear1", "fc1") + if "linear2" in orig_key: + orig_key = orig_key.replace("linear2", "fc2") + if "query_embed" in orig_key: + orig_key = orig_key.replace("query_embed", "query_position_embeddings") + if "cross_attn" in orig_key: + orig_key = orig_key.replace("cross_attn", "encoder_attn") + + return orig_key + + +def read_in_q_k_v(state_dict): + # transformer decoder self-attention layers + for i in range(6): + # read in weights + bias of input projection layer of self-attention + in_proj_weight = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_weight") + in_proj_bias = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] + state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] + state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] + state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] + state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] + state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + im = Image.open(requests.get(url, stream=True).raw) + + return im + + +@torch.no_grad() +def convert_deformable_detr_checkpoint( + checkpoint_path, + single_scale, + dilation, + with_box_refine, + two_stage, + pytorch_dump_folder_path, + push_to_hub, +): + """ + Copy/paste/tweak model's weights to our Deformable DETR structure. + """ + + # load default config + config = DeformableDetrConfig() + # set config attributes + if single_scale: + config.num_feature_levels = 1 + config.dilation = dilation + config.with_box_refine = with_box_refine + config.two_stage = two_stage + # set labels + config.num_labels = 91 + repo_id = "datasets/huggingface/label-files" + filename = "coco-detection-id2label.json" + id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename)), "r")) + id2label = {int(k): v for k, v in id2label.items()} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + + # load feature extractor + feature_extractor = DetrFeatureExtractor(format="coco_detection") + + # prepare image + img = prepare_img() + encoding = feature_extractor(images=img, return_tensors="pt") + pixel_values = encoding["pixel_values"] + + logger.info("Converting model...") + + # load original state dict + state_dict = torch.load(checkpoint_path, map_location="cpu")["model"] + # rename keys + for key in state_dict.copy().keys(): + val = state_dict.pop(key) + state_dict[rename_key(key)] = val + # query, key and value matrices need special treatment + read_in_q_k_v(state_dict) + # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them + prefix = "model." + for key in state_dict.copy().keys(): + if not key.startswith("class_embed") and not key.startswith("bbox_embed"): + val = state_dict.pop(key) + state_dict[prefix + key] = val + # finally, create HuggingFace model and load state dict + model = DeformableDetrForObjectDetection(config) + model.load_state_dict(state_dict) + model.eval() + + device = "cuda" if torch.cuda.is_available() else "cpu" + model.to(device) + # verify our conversion + outputs = model(pixel_values.to(device)) + + expected_logits = torch.tensor( + [[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]] + ) + expected_boxes = torch.tensor([[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]]) + + if single_scale: + expected_logits = torch.tensor( + [[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]] + ) + expected_boxes = torch.tensor([[0.7292, 0.4991, 0.5532], [0.7959, 0.2426, 0.4236], [0.7582, 0.3518, 0.4451]]) + + if single_scale and dilation: + expected_logits = torch.tensor( + [[-8.9652, -4.1074, -5.6635], [-9.0596, -4.9447, -6.6075], [-10.1178, -4.5275, -6.2671]] + ) + expected_boxes = torch.tensor([[0.7665, 0.4130, 0.4769], [0.8364, 0.1841, 0.3391], [0.6261, 0.3895, 0.7978]]) + + if with_box_refine: + expected_logits = torch.tensor( + [[-8.8895, -5.4187, -6.8153], [-8.4706, -6.1668, -7.6184], [-9.0042, -5.5359, -6.9141]] + ) + expected_boxes = torch.tensor([[0.7828, 0.2208, 0.4323], [0.0892, 0.5996, 0.1319], [0.5524, 0.6389, 0.8914]]) + + if with_box_refine and two_stage: + expected_logits = torch.tensor( + [[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]] + ) + expected_boxes = torch.tensor([[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]]) + + print("Logits:", outputs.logits[0, :3, :3]) + + assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4) + assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4) + + print("Everything ok!") + + # Save model and feature extractor + logger.info(f"Saving PyTorch model and feature extractor to {pytorch_dump_folder_path}...") + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + model.save_pretrained(pytorch_dump_folder_path) + feature_extractor.save_pretrained(pytorch_dump_folder_path) + + # Push to hub + if push_to_hub: + model_name = "deformable-detr" + model_name += "-single-scale" if single_scale else "" + model_name += "-dc5" if dilation else "" + model_name += "-with-box-refine" if with_box_refine else "" + model_name += "-two-stage" if two_stage else "" + print("Pushing model to hub...") + model.push_to_hub(repo_path_or_name=model_name, organization="nielsr", commit_message="Add model") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "--checkpoint_path", + type=str, + default="/home/niels/checkpoints/deformable_detr/r50_deformable_detr-checkpoint.pth", + help="Path to Pytorch checkpoint (.pth file) you'd like to convert.", + ) + parser.add_argument("--single_scale", action="store_true", help="Whether to set config.num_features_levels = 1.") + parser.add_argument("--dilation", action="store_true", help="Whether to set config.dilation=True.") + parser.add_argument("--with_box_refine", action="store_true", help="Whether to set config.with_box_refine=True.") + parser.add_argument("--two_stage", action="store_true", help="Whether to set config.two_stage=True.") + parser.add_argument( + "--pytorch_dump_folder_path", + default=None, + type=str, + required=True, + help="Path to the folder to output PyTorch model.", + ) + parser.add_argument( + "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." + ) + args = parser.parse_args() + convert_deformable_detr_checkpoint( + args.checkpoint_path, + args.single_scale, + args.dilation, + args.with_box_refine, + args.two_stage, + args.pytorch_dump_folder_path, + args.push_to_hub, + ) diff --git a/src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.cpp b/src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.cpp new file mode 100644 index 00000000000000..388a73d22d4c9b --- /dev/null +++ b/src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.cpp @@ -0,0 +1,40 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include + +#include +#include + + +at::Tensor +ms_deform_attn_cpu_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + AT_ERROR("Not implement on cpu"); +} + +std::vector +ms_deform_attn_cpu_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + AT_ERROR("Not implement on cpu"); +} diff --git a/src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.h b/src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.h new file mode 100644 index 00000000000000..7eac8c8bcd1bf5 --- /dev/null +++ b/src/transformers/models/deformable_detr/custom_kernel/cpu/ms_deform_attn_cpu.h @@ -0,0 +1,32 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once +#include + +at::Tensor +ms_deform_attn_cpu_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step); + +std::vector +ms_deform_attn_cpu_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step); + diff --git a/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cu b/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cu new file mode 100644 index 00000000000000..8ea1d7fabe2684 --- /dev/null +++ b/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cu @@ -0,0 +1,156 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include +#include "cuda/ms_deform_im2col_cuda.cuh" + +#include +#include +#include +#include + +#pragma once +#include + + +at::Tensor ms_deform_attn_cuda_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto output = at::zeros({batch, num_query, num_heads, channels}, value.options()); + + const int batch_n = im2col_step_; + auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto columns = output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] { + ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + columns.data()); + + })); + } + + output = output.view({batch, num_query, num_heads*channels}); + + return output; +} + + +std::vector ms_deform_attn_cuda_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto grad_value = at::zeros_like(value); + auto grad_sampling_loc = at::zeros_like(sampling_loc); + auto grad_attn_weight = at::zeros_like(attn_weight); + + const int batch_n = im2col_step_; + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto grad_output_g = grad_output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] { + ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), + grad_output_g.data(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + grad_value.data() + n * im2col_step_ * per_value_size, + grad_sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + grad_attn_weight.data() + n * im2col_step_ * per_attn_weight_size); + + })); + } + + return { + grad_value, grad_sampling_loc, grad_attn_weight + }; +} diff --git a/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cuh b/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cuh new file mode 100644 index 00000000000000..34f8ae9cb77bba --- /dev/null +++ b/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.cuh @@ -0,0 +1,1467 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include + +#include +#include + +#include +#include +#include + +#include +#include + +#include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + + +at::Tensor ms_deform_attn_cuda_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto output = at::zeros({batch, num_query, num_heads, channels}, value.options()); + + const int batch_n = im2col_step_; + auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto columns = output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] { + ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + columns.data()); + + })); + } + + output = output.view({batch, num_query, num_heads*channels}); + + return output; +} + + +std::vector ms_deform_attn_cuda_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + + AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous"); + AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous"); + AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous"); + AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous"); + AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous"); + AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous"); + + AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor"); + AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor"); + AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor"); + AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor"); + AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor"); + AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor"); + + const int batch = value.size(0); + const int spatial_size = value.size(1); + const int num_heads = value.size(2); + const int channels = value.size(3); + + const int num_levels = spatial_shapes.size(0); + + const int num_query = sampling_loc.size(1); + const int num_point = sampling_loc.size(4); + + const int im2col_step_ = std::min(batch, im2col_step); + + AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_); + + auto grad_value = at::zeros_like(value); + auto grad_sampling_loc = at::zeros_like(sampling_loc); + auto grad_attn_weight = at::zeros_like(attn_weight); + + const int batch_n = im2col_step_; + auto per_value_size = spatial_size * num_heads * channels; + auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2; + auto per_attn_weight_size = num_query * num_heads * num_levels * num_point; + auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels}); + + for (int n = 0; n < batch/im2col_step_; ++n) + { + auto grad_output_g = grad_output_n.select(0, n); + AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] { + ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(), + grad_output_g.data(), + value.data() + n * im2col_step_ * per_value_size, + spatial_shapes.data(), + level_start_index.data(), + sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + attn_weight.data() + n * im2col_step_ * per_attn_weight_size, + batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point, + grad_value.data() + n * im2col_step_ * per_value_size, + grad_sampling_loc.data() + n * im2col_step_ * per_sample_loc_size, + grad_attn_weight.data() + n * im2col_step_ * per_attn_weight_size); + + })); + } + + return { + grad_value, grad_sampling_loc, grad_attn_weight + }; +} + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N, const int num_threads) +{ + return (N + num_threads - 1) / num_threads; +} + + +template +__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + } + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + *grad_attn_weight = top_grad * val; + *grad_sampling_loc = width * grad_w_weight * top_grad_value; + *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + atomicAdd(grad_attn_weight, top_grad * val); + atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value); + atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value); +} + + +template +__global__ void ms_deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + scalar_t *data_col_ptr = data_col + index; + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + scalar_t col = 0; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride); + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight; + } + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + } + } + *data_col_ptr = col; + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockSize; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockSize/2; s>0; s>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockDim.x; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]); + atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]); + atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]); + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear_gm( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + grad_sampling_loc, grad_attn_weight); + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +void ms_deformable_im2col_cuda(cudaStream_t stream, + const scalar_t* data_value, + const int64_t* data_spatial_shapes, + const int64_t* data_level_start_index, + const scalar_t* data_sampling_loc, + const scalar_t* data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* data_col) +{ + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + const int num_threads = CUDA_NUM_THREADS; + ms_deformable_im2col_gpu_kernel + <<>>( + num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, + batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void ms_deformable_col2im_cuda(cudaStream_t stream, + const scalar_t* grad_col, + const scalar_t* data_value, + const int64_t * data_spatial_shapes, + const int64_t * data_level_start_index, + const scalar_t * data_sampling_loc, + const scalar_t * data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels; + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + if (channels > 1024) + { + if ((channels & 1023) == 0) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_gm + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + else{ + switch(channels) + { + case 1: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 2: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 4: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 8: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 16: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 32: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 64: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 128: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 256: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 512: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 1024: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + default: + if (channels < 64) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + } + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } + +} diff --git a/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.h b/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.h new file mode 100644 index 00000000000000..fbcf4543e66bb1 --- /dev/null +++ b/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_attn_cuda.h @@ -0,0 +1,29 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once +#include + +at::Tensor ms_deform_attn_cuda_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step); + +std::vector ms_deform_attn_cuda_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step); diff --git a/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_im2col_cuda.cuh b/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_im2col_cuda.cuh new file mode 100644 index 00000000000000..c0db0c88c9db2c --- /dev/null +++ b/src/transformers/models/deformable_detr/custom_kernel/cuda/ms_deform_im2col_cuda.cuh @@ -0,0 +1,1327 @@ +/*! +************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************** +* Modified from DCN (https://github.com/msracver/Deformable-ConvNets) +* Copyright (c) 2018 Microsoft +************************************************************************** +*/ + +#include +#include +#include + +#include +#include + +#include + +#define CUDA_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ + i < (n); \ + i += blockDim.x * gridDim.x) + +const int CUDA_NUM_THREADS = 1024; +inline int GET_BLOCKS(const int N, const int num_threads) +{ + return (N + num_threads - 1) / num_threads; +} + + +template +__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + } + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + *grad_attn_weight = top_grad * val; + *grad_sampling_loc = width * grad_w_weight * top_grad_value; + *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value; +} + + +template +__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data, + const int &height, const int &width, const int &nheads, const int &channels, + const scalar_t &h, const scalar_t &w, const int &m, const int &c, + const scalar_t &top_grad, + const scalar_t &attn_weight, + scalar_t* &grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int h_low = floor(h); + const int w_low = floor(w); + const int h_high = h_low + 1; + const int w_high = w_low + 1; + + const scalar_t lh = h - h_low; + const scalar_t lw = w - w_low; + const scalar_t hh = 1 - lh, hw = 1 - lw; + + const int w_stride = nheads * channels; + const int h_stride = width * w_stride; + const int h_low_ptr_offset = h_low * h_stride; + const int h_high_ptr_offset = h_low_ptr_offset + h_stride; + const int w_low_ptr_offset = w_low * w_stride; + const int w_high_ptr_offset = w_low_ptr_offset + w_stride; + const int base_ptr = m * channels + c; + + const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + const scalar_t top_grad_value = top_grad * attn_weight; + scalar_t grad_h_weight = 0, grad_w_weight = 0; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + { + const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr; + v1 = bottom_data[ptr1]; + grad_h_weight -= hw * v1; + grad_w_weight -= hh * v1; + atomicAdd(grad_value+ptr1, w1*top_grad_value); + } + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + { + const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr; + v2 = bottom_data[ptr2]; + grad_h_weight -= lw * v2; + grad_w_weight += hh * v2; + atomicAdd(grad_value+ptr2, w2*top_grad_value); + } + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + { + const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr; + v3 = bottom_data[ptr3]; + grad_h_weight += hw * v3; + grad_w_weight -= lh * v3; + atomicAdd(grad_value+ptr3, w3*top_grad_value); + } + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + { + const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr; + v4 = bottom_data[ptr4]; + grad_h_weight += lw * v4; + grad_w_weight += lh * v4; + atomicAdd(grad_value+ptr4, w4*top_grad_value); + } + + const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + atomicAdd(grad_attn_weight, top_grad * val); + atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value); + atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value); +} + + +template +__global__ void ms_deformable_im2col_gpu_kernel(const int n, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *data_col) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + scalar_t *data_col_ptr = data_col + index; + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + scalar_t col = 0; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride); + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight; + } + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + } + } + *data_col_ptr = col; + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockSize; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2]; + __shared__ scalar_t cache_grad_attn_weight[blockSize]; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockSize/2; s>0; s>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + if (tid == 0) + { + scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0]; + int sid=2; + for (unsigned int tid = 1; tid < blockDim.x; ++tid) + { + _grad_w += cache_grad_sampling_loc[sid]; + _grad_h += cache_grad_sampling_loc[sid + 1]; + _grad_a += cache_grad_attn_weight[tid]; + sid += 2; + } + + + *grad_sampling_loc = _grad_w; + *(grad_sampling_loc + 1) = _grad_h; + *grad_attn_weight = _grad_a; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + *grad_sampling_loc = cache_grad_sampling_loc[0]; + *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1]; + *grad_attn_weight = cache_grad_attn_weight[0]; + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + +template +__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + extern __shared__ int _s[]; + scalar_t* cache_grad_sampling_loc = (scalar_t*)_s; + scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x; + unsigned int tid = threadIdx.x; + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0; + *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0; + *(cache_grad_attn_weight+threadIdx.x)=0; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x); + } + + __syncthreads(); + + for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1) + { + if (tid < s) { + const unsigned int xid1 = tid << 1; + const unsigned int xid2 = (tid + s) << 1; + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1]; + if (tid + (s << 1) < spre) + { + cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)]; + cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)]; + cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)]; + } + } + __syncthreads(); + } + + if (tid == 0) + { + atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]); + atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]); + atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]); + } + __syncthreads(); + + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n, + const scalar_t *grad_col, + const scalar_t *data_value, + const int64_t *data_spatial_shapes, + const int64_t *data_level_start_index, + const scalar_t *data_sampling_loc, + const scalar_t *data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t *grad_value, + scalar_t *grad_sampling_loc, + scalar_t *grad_attn_weight) +{ + CUDA_KERNEL_LOOP(index, n) + { + int _temp = index; + const int c_col = _temp % channels; + _temp /= channels; + const int sampling_index = _temp; + const int m_col = _temp % num_heads; + _temp /= num_heads; + const int q_col = _temp % num_query; + _temp /= num_query; + const int b_col = _temp; + + const scalar_t top_grad = grad_col[index]; + + int data_weight_ptr = sampling_index * num_levels * num_point; + int data_loc_w_ptr = data_weight_ptr << 1; + const int grad_sampling_ptr = data_weight_ptr; + grad_sampling_loc += grad_sampling_ptr << 1; + grad_attn_weight += grad_sampling_ptr; + const int grad_weight_stride = 1; + const int grad_loc_stride = 2; + const int qid_stride = num_heads * channels; + const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride; + + for (int l_col=0; l_col < num_levels; ++l_col) + { + const int level_start_id = data_level_start_index[l_col]; + const int spatial_h_ptr = l_col << 1; + const int spatial_h = data_spatial_shapes[spatial_h_ptr]; + const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1]; + const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride; + const scalar_t *data_value_ptr = data_value + value_ptr_offset; + scalar_t *grad_value_ptr = grad_value + value_ptr_offset; + + for (int p_col=0; p_col < num_point; ++p_col) + { + const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr]; + const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1]; + const scalar_t weight = data_attn_weight[data_weight_ptr]; + + const scalar_t h_im = loc_h * spatial_h - 0.5; + const scalar_t w_im = loc_w * spatial_w - 0.5; + if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w) + { + ms_deform_attn_col2im_bilinear_gm( + data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col, + top_grad, weight, grad_value_ptr, + grad_sampling_loc, grad_attn_weight); + } + data_weight_ptr += 1; + data_loc_w_ptr += 2; + grad_attn_weight += grad_weight_stride; + grad_sampling_loc += grad_loc_stride; + } + } + } +} + + +template +void ms_deformable_im2col_cuda(cudaStream_t stream, + const scalar_t* data_value, + const int64_t* data_spatial_shapes, + const int64_t* data_level_start_index, + const scalar_t* data_sampling_loc, + const scalar_t* data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* data_col) +{ + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + const int num_threads = CUDA_NUM_THREADS; + ms_deformable_im2col_gpu_kernel + <<>>( + num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, + batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); + } + +} + +template +void ms_deformable_col2im_cuda(cudaStream_t stream, + const scalar_t* grad_col, + const scalar_t* data_value, + const int64_t * data_spatial_shapes, + const int64_t * data_level_start_index, + const scalar_t * data_sampling_loc, + const scalar_t * data_attn_weight, + const int batch_size, + const int spatial_size, + const int num_heads, + const int channels, + const int num_levels, + const int num_query, + const int num_point, + scalar_t* grad_value, + scalar_t* grad_sampling_loc, + scalar_t* grad_attn_weight) +{ + const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels; + const int num_kernels = batch_size * num_query * num_heads * channels; + const int num_actual_kernels = batch_size * num_query * num_heads * channels; + if (channels > 1024) + { + if ((channels & 1023) == 0) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_gm + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + else{ + switch(channels) + { + case 1: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 2: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 4: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 8: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 16: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 32: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 64: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 128: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 256: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 512: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + case 1024: + ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + break; + default: + if (channels < 64) + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v1 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + else + { + ms_deformable_col2im_gpu_kernel_shm_reduce_v2 + <<>>( + num_kernels, + grad_col, + data_value, + data_spatial_shapes, + data_level_start_index, + data_sampling_loc, + data_attn_weight, + batch_size, + spatial_size, + num_heads, + channels, + num_levels, + num_query, + num_point, + grad_value, + grad_sampling_loc, + grad_attn_weight); + } + } + } + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) + { + printf("error in ms_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); + } + +} diff --git a/src/transformers/models/deformable_detr/custom_kernel/ms_deform_attn.h b/src/transformers/models/deformable_detr/custom_kernel/ms_deform_attn.h new file mode 100644 index 00000000000000..119b1fa317d1e5 --- /dev/null +++ b/src/transformers/models/deformable_detr/custom_kernel/ms_deform_attn.h @@ -0,0 +1,61 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#pragma once + +#include "cpu/ms_deform_attn_cpu.h" + +#ifdef WITH_CUDA +#include "cuda/ms_deform_attn_cuda.h" +#endif + + +at::Tensor +ms_deform_attn_forward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const int im2col_step) +{ + if (value.type().is_cuda()) + { +#ifdef WITH_CUDA + return ms_deform_attn_cuda_forward( + value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} + +std::vector +ms_deform_attn_backward( + const at::Tensor &value, + const at::Tensor &spatial_shapes, + const at::Tensor &level_start_index, + const at::Tensor &sampling_loc, + const at::Tensor &attn_weight, + const at::Tensor &grad_output, + const int im2col_step) +{ + if (value.type().is_cuda()) + { +#ifdef WITH_CUDA + return ms_deform_attn_cuda_backward( + value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } + AT_ERROR("Not implemented on the CPU"); +} diff --git a/src/transformers/models/deformable_detr/custom_kernel/vision.cpp b/src/transformers/models/deformable_detr/custom_kernel/vision.cpp new file mode 100644 index 00000000000000..6ce3875568b9ba --- /dev/null +++ b/src/transformers/models/deformable_detr/custom_kernel/vision.cpp @@ -0,0 +1,16 @@ +/*! +************************************************************************************************** +* Deformable DETR +* Copyright (c) 2020 SenseTime. All Rights Reserved. +* Licensed under the Apache License, Version 2.0 [see LICENSE for details] +************************************************************************************************** +* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0 +************************************************************************************************** +*/ + +#include "ms_deform_attn.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ms_deform_attn_forward", &ms_deform_attn_forward, "ms_deform_attn_forward"); + m.def("ms_deform_attn_backward", &ms_deform_attn_backward, "ms_deform_attn_backward"); +} \ No newline at end of file diff --git a/src/transformers/models/deformable_detr/load_custom.py b/src/transformers/models/deformable_detr/load_custom.py new file mode 100644 index 00000000000000..d2a8bc0cb2c074 --- /dev/null +++ b/src/transformers/models/deformable_detr/load_custom.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Loading of Deformable DETR's CUDA kernels""" + +import os + + +def load_cuda_kernels(): + from torch.utils.cpp_extension import load + + root = os.path.join(os.path.dirname(os.path.realpath(__file__)), "custom_kernel") + src_files = [ + os.path.join(root, filename) + for filename in [ + "vision.cpp", + os.path.join("cpu", "ms_deform_attn_cpu.cpp"), + os.path.join("cuda", "ms_deform_attn_cuda.cu"), + ] + ] + + load( + "MultiScaleDeformableAttention", + src_files, + # verbose=True, + with_cuda=True, + extra_include_paths=[root], + # build_directory=os.path.dirname(os.path.realpath(__file__)), + extra_cflags=["-DWITH_CUDA=1"], + extra_cuda_cflags=[ + "-DCUDA_HAS_FP16=1", + "-D__CUDA_NO_HALF_OPERATORS__", + "-D__CUDA_NO_HALF_CONVERSIONS__", + "-D__CUDA_NO_HALF2_OPERATORS__", + ], + ) + + import MultiScaleDeformableAttention as MSDA + + return MSDA diff --git a/src/transformers/models/deformable_detr/modeling_deformable_detr.py b/src/transformers/models/deformable_detr/modeling_deformable_detr.py new file mode 100755 index 00000000000000..acd4d4012470ff --- /dev/null +++ b/src/transformers/models/deformable_detr/modeling_deformable_detr.py @@ -0,0 +1,2465 @@ +# coding=utf-8 +# Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Deformable DETR model.""" + + +import copy +import math +import warnings +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import Tensor, nn +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from ...activations import ACT2FN +from ...file_utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_scipy_available, + is_timm_available, + is_torch_cuda_available, + is_vision_available, + replace_return_docstrings, + requires_backends, +) +from ...modeling_outputs import BaseModelOutput +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_deformable_detr import DeformableDetrConfig +from .load_custom import load_cuda_kernels + + +logger = logging.get_logger(__name__) + +# Move this to not compile only when importing, this needs to happen later, like in __init__. +if is_torch_cuda_available(): + logger.info("Loading custom CUDA kernels...") + MultiScaleDeformableAttention = load_cuda_kernels() +else: + MultiScaleDeformableAttention = None + + +class MultiScaleDeformableAttentionFunction(Function): + @staticmethod + def forward( + context, + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + im2col_step, + ): + context.im2col_step = im2col_step + output = MultiScaleDeformableAttention.ms_deform_attn_forward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + context.im2col_step, + ) + context.save_for_backward( + value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights + ) + return output + + @staticmethod + @once_differentiable + def backward(context, grad_output): + ( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + ) = context.saved_tensors + grad_value, grad_sampling_loc, grad_attn_weight = MultiScaleDeformableAttention.ms_deform_attn_backward( + value, + value_spatial_shapes, + value_level_start_index, + sampling_locations, + attention_weights, + grad_output, + context.im2col_step, + ) + + return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None + + +if is_scipy_available(): + from scipy.optimize import linear_sum_assignment + +if is_vision_available(): + from transformers.models.detr.feature_extraction_detr import center_to_corners_format + +if is_timm_available(): + from timm import create_model + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "DeformableDetrConfig" +_CHECKPOINT_FOR_DOC = "sensetime/deformable-detr" + +DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "sensetime/deformable-detr", + # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr +] + + +@dataclass +class DeformableDetrDecoderOutput(ModelOutput): + """ + Base class for outputs of the DeformableDetrDecoder. This class adds two attributes to + BaseModelOutputWithCrossAttentions, namely: + - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) + - a stacked tensor of intermediate reference points. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): + Stacked intermediate hidden states (output of each layer of the decoder). + intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Stacked intermediate reference points (reference points of each layer of the decoder). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer + plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, + used to compute the weighted average in the cross-attention heads. + """ + + last_hidden_state: torch.FloatTensor = None + intermediate_hidden_states: torch.FloatTensor = None + intermediate_reference_points: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class DeformableDetrModelOutput(ModelOutput): + """ + Base class for outputs of the Deformable DETR encoder-decoder model. + + Args: + init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Initial reference points sent through the Transformer decoder. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`): + Stacked intermediate hidden states (output of each layer of the decoder). + intermediate_reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 4)`): + Stacked intermediate reference points (reference points of each layer of the decoder). + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer + plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, + num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each + layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): + Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are + picked as region proposals in the first stage. Output of bounding box binary classification (i.e. + foreground and background). + enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): + Logits of predicted bounding boxes coordinates in the first stage. + """ + + init_reference_points: torch.FloatTensor = None + last_hidden_state: torch.FloatTensor = None + intermediate_hidden_states: torch.FloatTensor = None + intermediate_reference_points: torch.FloatTensor = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + enc_outputs_class: Optional[torch.FloatTensor] = None + enc_outputs_coord_logits: Optional[torch.FloatTensor] = None + + +@dataclass +class DeformableDetrObjectDetectionOutput(ModelOutput): + """ + Output type of [`DeformableDetrForObjectDetection`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): + Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a + bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized + scale-invariant IoU loss. + loss_dict (`Dict`, *optional*): + A dictionary containing the individual losses. Useful for logging. + logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): + Classification logits (including no-object) for all queries. + pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These + values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding + possible padding). You can use [`~AutoFeatureExtractor.post_process`] to retrieve the unnormalized bounding + boxes. + auxiliary_outputs (`list[Dict]`, *optional*): + Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) + and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and + `pred_boxes`) for each decoder layer. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer + plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, + num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted + average in the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each + layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_heads, 4, + 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average + in the self-attention heads. + intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`): + Stacked intermediate hidden states (output of each layer of the decoder). + intermediate_reference_points (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, 4)`): + Stacked intermediate reference points (reference points of each layer of the decoder). + init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Initial reference points sent through the Transformer decoder. + enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): + Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are + picked as region proposals in the first stage. Output of bounding box binary classification (i.e. + foreground and background). + enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): + Logits of predicted bounding boxes coordinates in the first stage. + """ + + loss: Optional[torch.FloatTensor] = None + loss_dict: Optional[Dict] = None + logits: torch.FloatTensor = None + pred_boxes: torch.FloatTensor = None + auxiliary_outputs: Optional[List[Dict]] = None + init_reference_points: Optional[torch.FloatTensor] = None + last_hidden_state: Optional[torch.FloatTensor] = None + intermediate_hidden_states: Optional[torch.FloatTensor] = None + intermediate_reference_points: Optional[torch.FloatTensor] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + enc_outputs_class: Optional = None + enc_outputs_coord_logits: Optional = None + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def inverse_sigmoid(x, eps=1e-5): + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +# Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->DeformableDetr +class DeformableDetrFrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than + torchvision.models.resnet[18,34,50,101] produce nans. + """ + + def __init__(self, n): + super().__init__() + self.register_buffer("weight", torch.ones(n)) + self.register_buffer("bias", torch.zeros(n)) + self.register_buffer("running_mean", torch.zeros(n)) + self.register_buffer("running_var", torch.ones(n)) + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + num_batches_tracked_key = prefix + "num_batches_tracked" + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ) + + def forward(self, x): + # move reshapes to the beginning + # to make it user-friendly + weight = self.weight.reshape(1, -1, 1, 1) + bias = self.bias.reshape(1, -1, 1, 1) + running_var = self.running_var.reshape(1, -1, 1, 1) + running_mean = self.running_mean.reshape(1, -1, 1, 1) + epsilon = 1e-5 + scale = weight * (running_var + epsilon).rsqrt() + bias = bias - running_mean * scale + return x * scale + bias + + +# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->DeformableDetr +def replace_batch_norm(m, name=""): + for attr_str in dir(m): + target_attr = getattr(m, attr_str) + if isinstance(target_attr, nn.BatchNorm2d): + frozen = DeformableDetrFrozenBatchNorm2d(target_attr.num_features) + bn = getattr(m, attr_str) + frozen.weight.data.copy_(bn.weight) + frozen.bias.data.copy_(bn.bias) + frozen.running_mean.data.copy_(bn.running_mean) + frozen.running_var.data.copy_(bn.running_var) + setattr(m, attr_str, frozen) + for n, ch in m.named_children(): + replace_batch_norm(ch, n) + + +class DeformableDetrTimmConvEncoder(nn.Module): + """ + Convolutional encoder (backbone) from the timm library. + + nn.BatchNorm2d layers are replaced by DeformableDetrFrozenBatchNorm2d as defined above. + """ + + def __init__(self, config): + super().__init__() + + kwargs = {} + if config.dilation: + kwargs["output_stride"] = 16 + + requires_backends(self, ["timm"]) + + out_indices = (2, 3, 4) if config.num_feature_levels > 1 else (4,) + backbone = create_model( + config.backbone, pretrained=True, features_only=True, out_indices=out_indices, **kwargs + ) + # replace batch norm by frozen batch norm + with torch.no_grad(): + replace_batch_norm(backbone) + self.model = backbone + self.intermediate_channel_sizes = self.model.feature_info.channels() + self.strides = self.model.feature_info.reduction() + + if "resnet" in config.backbone: + for name, parameter in self.model.named_parameters(): + if "layer2" not in name and "layer3" not in name and "layer4" not in name: + parameter.requires_grad_(False) + + def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): + """ + Outputs feature maps of latter stages C_3 through C_5 in ResNet if `config.num_feature_levels > 1`, otherwise + outputs feature maps of C_5. + """ + # send pixel_values through the model to get list of feature maps + features = self.model(pixel_values) + + out = [] + for feature_map in features: + # downsample pixel_mask to match shape of corresponding feature_map + mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] + out.append((feature_map, mask)) + return out + + +# Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->DeformableDetr +class DeformableDetrConvModel(nn.Module): + """ + This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. + """ + + def __init__(self, conv_encoder, position_embedding): + super().__init__() + self.conv_encoder = conv_encoder + self.position_embedding = position_embedding + + def forward(self, pixel_values, pixel_mask): + # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples + out = self.conv_encoder(pixel_values, pixel_mask) + pos = [] + for feature_map, mask in out: + # position encoding + pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) + + return out, pos + + +# Copied from transformers.models.detr.modeling_detr._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): + """ + Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. + """ + batch_size, source_len = mask.size() + target_len = target_len if target_len is not None else source_len + + expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) + + +class DeformableDetrSinePositionEmbedding(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one used by the Attention is all you + need paper, generalized to work on images. + """ + + def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.embedding_dim = embedding_dim + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, pixel_values, pixel_mask): + if pixel_mask is None: + raise ValueError("No pixel mask provided") + y_embed = pixel_mask.cumsum(1, dtype=torch.float32) + x_embed = pixel_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.embedding_dim) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +# Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding +class DeformableDetrLearnedPositionEmbedding(nn.Module): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, embedding_dim=256): + super().__init__() + self.row_embeddings = nn.Embedding(50, embedding_dim) + self.column_embeddings = nn.Embedding(50, embedding_dim) + + def forward(self, pixel_values, pixel_mask=None): + height, width = pixel_values.shape[-2:] + width_values = torch.arange(width, device=pixel_values.device) + height_values = torch.arange(height, device=pixel_values.device) + x_emb = self.column_embeddings(width_values) + y_emb = self.row_embeddings(height_values) + pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) + pos = pos.permute(2, 0, 1) + pos = pos.unsqueeze(0) + pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) + return pos + + +# Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->DeformableDetr +def build_position_encoding(config): + n_steps = config.d_model // 2 + if config.position_embedding_type == "sine": + # TODO find a better way of exposing other arguments + position_embedding = DeformableDetrSinePositionEmbedding(n_steps, normalize=True) + elif config.position_embedding_type == "learned": + position_embedding = DeformableDetrLearnedPositionEmbedding(n_steps) + else: + raise ValueError(f"Not supported {config.position_embedding_type}") + + return position_embedding + + +def ms_deform_attn_core_pytorch(value, value_spatial_shapes, sampling_locations, attention_weights): + # for debug and test only, + # need to use cuda version instead + N_, S_, M_, D_ = value.shape + _, Lq_, M_, L_, P_, _ = sampling_locations.shape + value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1) + sampling_grids = 2 * sampling_locations - 1 + sampling_value_list = [] + for lid_, (H_, W_) in enumerate(value_spatial_shapes): + # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_ + value_l_ = value_list[lid_].flatten(2).transpose(1, 2).reshape(N_ * M_, D_, H_, W_) + # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2 + sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1) + # N_*M_, D_, Lq_, P_ + sampling_value_l_ = F.grid_sample( + value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False + ) + sampling_value_list.append(sampling_value_l_) + # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_) + attention_weights = attention_weights.transpose(1, 2).reshape(N_ * M_, 1, Lq_, L_ * P_) + output = (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights).sum(-1).view(N_, M_ * D_, Lq_) + return output.transpose(1, 2).contiguous() + + +class DeformableDetrMultiscaleDeformableAttention(nn.Module): + """ + Multiscale deformable attention as proposed in Deformable DETR. + """ + + def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int): + super().__init__() + if embed_dim % num_heads != 0: + raise ValueError( + f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}" + ) + dim_per_head = embed_dim // num_heads + # check if dim_per_head is power of 2 + if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): + warnings.warn( + "You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the" + " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" + " implementation." + ) + + self.im2col_step = 64 + + self.d_model = embed_dim + self.n_levels = n_levels + self.n_heads = num_heads + self.n_points = n_points + + self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2) + self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points) + self.value_proj = nn.Linear(embed_dim, embed_dim) + self.output_proj = nn.Linear(embed_dim, embed_dim) + + self._reset_parameters() + + def _reset_parameters(self): + nn.init.constant_(self.sampling_offsets.weight.data, 0.0) + thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) + grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) + grid_init = ( + (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) + .view(self.n_heads, 1, 1, 2) + .repeat(1, self.n_levels, self.n_points, 1) + ) + for i in range(self.n_points): + grid_init[:, :, i, :] *= i + 1 + with torch.no_grad(): + self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) + nn.init.constant_(self.attention_weights.weight.data, 0.0) + nn.init.constant_(self.attention_weights.bias.data, 0.0) + nn.init.xavier_uniform_(self.value_proj.weight.data) + nn.init.constant_(self.value_proj.bias.data, 0.0) + nn.init.xavier_uniform_(self.output_proj.weight.data) + nn.init.constant_(self.output_proj.bias.data, 0.0) + + def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): + return tensor if position_embeddings is None else tensor + position_embeddings + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states=None, + encoder_attention_mask=None, + position_embeddings: Optional[torch.Tensor] = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + output_attentions: bool = False, + ): + # add position embeddings to the hidden states before projecting to queries and keys + if position_embeddings is not None: + hidden_states = self.with_pos_embed(hidden_states, position_embeddings) + + batch_size, num_queries, _ = hidden_states.shape + batch_size, sequence_length, _ = encoder_hidden_states.shape + if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: + raise ValueError( + "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" + ) + + value = self.value_proj(encoder_hidden_states) + if attention_mask is not None: + # we invert the attention_mask + value = value.masked_fill(~attention_mask[..., None], float(0)) + value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) + sampling_offsets = self.sampling_offsets(hidden_states).view( + batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 + ) + attention_weights = self.attention_weights(hidden_states).view( + batch_size, num_queries, self.n_heads, self.n_levels * self.n_points + ) + attention_weights = F.softmax(attention_weights, -1).view( + batch_size, num_queries, self.n_heads, self.n_levels, self.n_points + ) + # batch_size, num_queries, n_heads, n_levels, n_points, 2 + if reference_points.shape[-1] == 2: + offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) + sampling_locations = ( + reference_points[:, :, None, :, None, :] + + sampling_offsets / offset_normalizer[None, None, None, :, None, :] + ) + elif reference_points.shape[-1] == 4: + sampling_locations = ( + reference_points[:, :, None, :, None, :2] + + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 + ) + else: + raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") + try: + # GPU + output = MultiScaleDeformableAttentionFunction.apply( + value, + spatial_shapes, + level_start_index, + sampling_locations, + attention_weights, + self.im2col_step, + ) + except Exception: + # CPU + output = ms_deform_attn_core_pytorch(value, spatial_shapes, sampling_locations, attention_weights) + output = self.output_proj(output) + + return output, attention_weights + + +class DeformableDetrMultiheadAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. + + Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper). + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + if self.head_dim * num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): + return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): + return tensor if position_embeddings is None else tensor + position_embeddings + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_embeddings: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + batch_size, target_len, embed_dim = hidden_states.size() + # add position embeddings to the hidden states before projecting to queries and keys + if position_embeddings is not None: + hidden_states_original = hidden_states + hidden_states = self.with_pos_embed(hidden_states, position_embeddings) + + # get queries, keys and values + query_states = self.q_proj(hidden_states) * self.scaling + key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) + value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) + + proj_shape = (batch_size * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + source_len = key_states.size(1) + + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): + raise ValueError( + f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" + f" {attn_weights.size()}" + ) + + # expand attention_mask + if attention_mask is not None: + # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] + attention_mask = _expand_mask(attention_mask, hidden_states.dtype) + + if attention_mask is not None: + if attention_mask.size() != (batch_size, 1, target_len, source_len): + raise ValueError( + f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" + f" {attention_mask.size()}" + ) + attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask + attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(batch_size, target_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +class DeformableDetrEncoderLayer(nn.Module): + def __init__(self, config: DeformableDetrConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = DeformableDetrMultiscaleDeformableAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + n_levels=config.num_feature_levels, + n_points=config.encoder_n_points, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + position_embeddings: torch.Tensor = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + output_attentions: bool = False, + ): + """ + Args: + hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Input to the layer. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Attention mask. + position_embeddings (`torch.FloatTensor`, *optional*): + Position embeddings, to be added to `hidden_states`. + reference_points (`torch.FloatTensor`, *optional*): + Reference points. + spatial_shapes (`torch.LongTensor`, *optional*): + Spatial shapes of the backbone feature maps. + level_start_index (`torch.LongTensor`, *optional*): + Level start index. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=hidden_states, + encoder_attention_mask=attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if self.training: + if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class DeformableDetrDecoderLayer(nn.Module): + def __init__(self, config: DeformableDetrConfig): + super().__init__() + self.embed_dim = config.d_model + + # self-attention + self.self_attn = DeformableDetrMultiheadAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + # cross-attention + self.encoder_attn = DeformableDetrMultiscaleDeformableAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + n_levels=config.num_feature_levels, + n_points=config.decoder_n_points, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + # feedforward neural networks + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + position_embeddings: Optional[torch.Tensor] = None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ): + """ + Args: + hidden_states (`torch.FloatTensor`): + Input to the layer of shape `(seq_len, batch, embed_dim)`. + position_embeddings (`torch.FloatTensor`, *optional*): + Position embeddings that are added to the queries and keys in the self-attention layer. + reference_points (`torch.FloatTensor`, *optional*): + Reference points. + spatial_shapes (`torch.LongTensor`, *optional*): + Spatial shapes. + level_start_index (`torch.LongTensor`, *optional*): + Level start index. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(seq_len, batch, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Self Attention + hidden_states, self_attn_weights = self.self_attn( + hidden_states=hidden_states, + position_embeddings=position_embeddings, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + second_residual = hidden_states + + # Cross-Attention + cross_attn_weights = None + hidden_states, cross_attn_weights = self.encoder_attn( + hidden_states=hidden_states, + attention_mask=encoder_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = second_residual + hidden_states + + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + return outputs + + +# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead +class DeformableDetrClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): + super().__init__() + self.dense = nn.Linear(input_dim, inner_dim) + self.dropout = nn.Dropout(p=pooler_dropout) + self.out_proj = nn.Linear(inner_dim, num_classes) + + def forward(self, hidden_states: torch.Tensor): + hidden_states = self.dropout(hidden_states) + hidden_states = self.dense(hidden_states) + hidden_states = torch.tanh(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.out_proj(hidden_states) + return hidden_states + + +class DeformableDetrPreTrainedModel(PreTrainedModel): + config_class = DeformableDetrConfig + base_model_prefix = "model" + main_input_name = "pixel_values" + + def _init_weights(self, module): + std = self.config.init_std + + if isinstance(module, DeformableDetrLearnedPositionEmbedding): + nn.init.uniform_(module.row_embeddings.weight) + nn.init.uniform_(module.column_embeddings.weight) + elif isinstance(module, DeformableDetrMultiscaleDeformableAttention): + module._reset_parameters() + elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + if hasattr(module, "reference_points") and not self.config.two_stage: + nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) + nn.init.constant_(module.reference_points.bias.data, 0.0) + if hasattr(module, "level_embed"): + nn.init.normal_(module.level_embed) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, DeformableDetrDecoder): + module.gradient_checkpointing = value + + +DEFORMABLE_DETR_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`DeformableDetrConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +DEFORMABLE_DETR_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. + + Pixel values can be obtained using [`AutoFeatureExtractor`]. See [`AutoFeatureExtractor.__call__`] for + details. + + pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): + Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + [What are attention masks?](../glossary#attention-mask) + + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, num_queries)`, *optional*): + Not used by default. Can be used to mask object queries. + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you + can choose to directly pass a flattened representation of an image. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): + Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an + embedded representation. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. +""" + + +class DeformableDetrEncoder(DeformableDetrPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a + [`DeformableDetrEncoderLayer`]. + + The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. + + Args: + config: DeformableDetrConfig + """ + + def __init__(self, config: DeformableDetrConfig): + super().__init__(config) + + self.dropout = config.dropout + self.layers = nn.ModuleList([DeformableDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) + + # Initialize weights and apply final processing + self.post_init() + + @staticmethod + def get_reference_points(spatial_shapes, valid_ratios, device): + """ + Get reference points for each feature map. Used in decoder. + + Args: + spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of each feature map. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): + Valid ratios of each feature map. + device (`torch.device`): + Device on which to create the tensors. + Returns: + `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` + """ + reference_points_list = [] + for level, (height, width) in enumerate(spatial_shapes): + + ref_y, ref_x = torch.meshgrid( + torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), + torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), + ) + # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36 + ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) + ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) + ref = torch.stack((ref_x, ref_y), -1) + reference_points_list.append(ref) + reference_points = torch.cat(reference_points_list, 1) + reference_points = reference_points[:, :, None] * valid_ratios[:, None] + return reference_points + + def forward( + self, + inputs_embeds=None, + attention_mask=None, + position_embeddings=None, + spatial_shapes=None, + level_start_index=None, + valid_ratios=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: + - 1 for pixel features that are real (i.e. **not masked**), + - 0 for pixel features that are padding (i.e. **masked**). + [What are attention masks?](../glossary#attention-mask) + position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Position embeddings that are added to the queries and keys in each self-attention layer. + spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of each feature map. + level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): + Starting index of each feature map. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): + Ratio of valid area in each feature level. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + hidden_states = inputs_embeds + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + for i, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + position_embeddings=position_embeddings, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class DeformableDetrDecoder(DeformableDetrPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DeformableDetrDecoderLayer`]. + + The decoder updates the query embeddings through multiple self-attention and cross-attention layers. + + Some tweaks for Deformable DETR: + + - `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass. + - it also returns a stack of intermediate outputs and reference points from all decoding layers. + + Args: + config: DeformableDetrConfig + """ + + def __init__(self, config: DeformableDetrConfig): + super().__init__(config) + + self.dropout = config.dropout + self.layers = nn.ModuleList([DeformableDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) + self.gradient_checkpointing = False + + # hack implementation for iterative bounding box refinement and two-stage Deformable DETR + self.bbox_embed = None + self.class_embed = None + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + position_embeddings=None, + reference_points=None, + spatial_shapes=None, + level_start_index=None, + valid_ratios=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): + The query embeddings that are passed into the decoder. + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected + in `[0, 1]`: + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): + Position embeddings that are added to the queries and keys in each self-attention layer. + reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*): + Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area. + spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`): + Spatial shapes of the feature maps. + level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*): + Indexes for the start of each feature level. In range `[0, sequence_length]`. + valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*): + Ratio of valid area in each feature level. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if inputs_embeds is not None: + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + intermediate = () + intermediate_reference_points = () + + for idx, decoder_layer in enumerate(self.layers): + if reference_points.shape[-1] == 4: + reference_points_input = ( + reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None] + ) + else: + if reference_points.shape[-1] != 2: + raise ValueError("Reference points' last dimension must be of size 2") + reference_points_input = reference_points[:, :, None] * valid_ratios[:, None] + + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + encoder_hidden_states, + encoder_attention_mask, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + position_embeddings=position_embeddings, + encoder_hidden_states=encoder_hidden_states, + reference_points=reference_points_input, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + # hack implementation for iterative bounding box refinement + if self.bbox_embed is not None: + tmp = self.bbox_embed[idx](hidden_states) + if reference_points.shape[-1] == 4: + new_reference_points = tmp + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + else: + if reference_points.shape[-1] != 2: + raise ValueError( + f"Reference points' last dimension must be of size 2, but is {reference_points.shape[-1]}" + ) + new_reference_points = tmp + new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points) + new_reference_points = new_reference_points.sigmoid() + reference_points = new_reference_points.detach() + + intermediate += (hidden_states,) + intermediate_reference_points += (reference_points,) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + intermediate = torch.stack(intermediate) + intermediate_reference_points = torch.stack(intermediate_reference_points) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + intermediate, + intermediate_reference_points, + all_hidden_states, + all_self_attns, + all_cross_attentions, + ] + if v is not None + ) + return DeformableDetrDecoderOutput( + last_hidden_state=hidden_states, + intermediate_hidden_states=intermediate, + intermediate_reference_points=intermediate_reference_points, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + """ + The bare Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw + hidden-states without any specific head on top. + """, + DEFORMABLE_DETR_START_DOCSTRING, +) +class DeformableDetrModel(DeformableDetrPreTrainedModel): + def __init__(self, config: DeformableDetrConfig): + super().__init__(config) + + # Create backbone + positional encoding + backbone = DeformableDetrTimmConvEncoder(config) + position_embeddings = build_position_encoding(config) + self.backbone = DeformableDetrConvModel(backbone, position_embeddings) + + # Create input projection layers + if config.num_feature_levels > 1: + num_backbone_outs = len(backbone.strides) + input_proj_list = [] + for _ in range(num_backbone_outs): + in_channels = backbone.intermediate_channel_sizes[_] + input_proj_list.append( + nn.Sequential( + nn.Conv2d(in_channels, config.d_model, kernel_size=1), + nn.GroupNorm(32, config.d_model), + ) + ) + for _ in range(config.num_feature_levels - num_backbone_outs): + input_proj_list.append( + nn.Sequential( + nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), + nn.GroupNorm(32, config.d_model), + ) + ) + in_channels = config.d_model + self.input_proj = nn.ModuleList(input_proj_list) + else: + self.input_proj = nn.ModuleList( + [ + nn.Sequential( + nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1), + nn.GroupNorm(32, config.d_model), + ) + ] + ) + + if not config.two_stage: + self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2) + + self.encoder = DeformableDetrEncoder(config) + self.decoder = DeformableDetrDecoder(config) + + self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) + + if config.two_stage: + self.enc_output = nn.Linear(config.d_model, config.d_model) + self.enc_output_norm = nn.LayerNorm(config.d_model) + self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2) + self.pos_trans_norm = nn.LayerNorm(config.d_model * 2) + else: + self.reference_points = nn.Linear(config.d_model, 2) + + self.post_init() + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def freeze_backbone(self): + for name, param in self.backbone.conv_encoder.model.named_parameters(): + param.requires_grad_(False) + + def unfreeze_backbone(self): + for name, param in self.backbone.conv_encoder.model.named_parameters(): + param.requires_grad_(True) + + def get_valid_ratio(self, mask): + """Get the valid ratio of all feature maps.""" + + _, height, width = mask.shape + valid_height = torch.sum(~mask[:, :, 0], 1) + valid_width = torch.sum(~mask[:, 0, :], 1) + valid_ratio_heigth = valid_height.float() / height + valid_ratio_width = valid_width.float() / width + valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) + return valid_ratio + + def get_proposal_pos_embed(self, proposals): + """Get the position embedding of the proposals.""" + + num_pos_feats = 128 + temperature = 10000 + scale = 2 * math.pi + + dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device) + dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats) + # batch_size, num_queries, 4 + proposals = proposals.sigmoid() * scale + # batch_size, num_queries, 4, 128 + pos = proposals[:, :, :, None] / dim_t + # batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512 + pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) + return pos + + def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): + """Generate the encoder output proposals from encoded enc_output. + + Args: + enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder. + padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`. + spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps. + + Returns: + `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction. + - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to + directly predict a bounding box. (without the need of a decoder) + - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse + sigmoid. + """ + batch_size = enc_output.shape[0] + proposals = [] + _cur = 0 + for level, (height, width) in enumerate(spatial_shapes): + mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1) + valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) + valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1) + + grid_y, grid_x = torch.meshgrid( + torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), + torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), + ) + grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) + + scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) + grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale + width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level) + proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) + proposals.append(proposal) + _cur += height * width + output_proposals = torch.cat(proposals, 1) + output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) + output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid + output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf")) + output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) + + # assign each pixel as an object query + object_query = enc_output + object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) + object_query = object_query.masked_fill(~output_proposals_valid, float(0)) + object_query = self.enc_output_norm(self.enc_output(object_query)) + return object_query, output_proposals + + @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=DeformableDetrModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values, + pixel_mask=None, + decoder_attention_mask=None, + encoder_outputs=None, + inputs_embeds=None, + decoder_inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoFeatureExtractor, DeformableDetrModel + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("SenseTime/deformable-detr") + >>> model = DeformableDetrModel.from_pretrained("SenseTime/deformable-detr") + + >>> inputs = feature_extractor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + [1, 300, 256] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, num_channels, height, width = pixel_values.shape + device = pixel_values.device + + if pixel_mask is None: + pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device) + + # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper) + # First, sent pixel_values + pixel_mask through Backbone to obtain the features + # which is a list of tuples + features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) + + # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) + sources = [] + masks = [] + for level, (source, mask) in enumerate(features): + sources.append(self.input_proj[level](source)) + masks.append(mask) + if mask is None: + raise ValueError("No attention mask was provided") + + # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage + if self.config.num_feature_levels > len(sources): + _len_sources = len(sources) + for level in range(_len_sources, self.config.num_feature_levels): + if level == _len_sources: + source = self.input_proj[level](features[-1][0]) + else: + source = self.input_proj[level](sources[-1]) + mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] + pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) + sources.append(source) + masks.append(mask) + position_embeddings_list.append(pos_l) + + # Create queries + query_embeds = None + if not self.config.two_stage: + query_embeds = self.query_position_embeddings.weight + + # Prepare encoder inputs (by flattening) + source_flatten = [] + mask_flatten = [] + lvl_pos_embed_flatten = [] + spatial_shapes = [] + for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)): + batch_size, num_channels, height, width = source.shape + spatial_shape = (height, width) + spatial_shapes.append(spatial_shape) + source = source.flatten(2).transpose(1, 2) + mask = mask.flatten(1) + pos_embed = pos_embed.flatten(2).transpose(1, 2) + lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) + lvl_pos_embed_flatten.append(lvl_pos_embed) + source_flatten.append(source) + mask_flatten.append(mask) + source_flatten = torch.cat(source_flatten, 1) + mask_flatten = torch.cat(mask_flatten, 1) + lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) + spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) + level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) + valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) + + # revert valid_ratios + valid_ratios = ~valid_ratios.bool() + valid_ratios = valid_ratios.float() + + # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder + # Also provide spatial_shapes, level_start_index and valid_ratios + if encoder_outputs is None: + encoder_outputs = self.encoder( + inputs_embeds=source_flatten, + attention_mask=mask_flatten, + position_embeddings=lvl_pos_embed_flatten, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # Fifth, prepare decoder inputs + batch_size, _, num_channels = encoder_outputs[0].shape + enc_outputs_class = None + enc_outputs_coord_logits = None + if self.config.two_stage: + object_query_embedding, output_proposals = self.gen_encoder_output_proposals( + encoder_outputs[0], ~mask_flatten, spatial_shapes + ) + + # hack implementation for two-stage Deformable DETR + # apply a detection head to each pixel (A.4 in paper) + # linear projection for bounding box binary classification (i.e. foreground and background) + enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding) + # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch) + delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding) + enc_outputs_coord_logits = delta_bbox + output_proposals + + # only keep top scoring `config.two_stage_num_proposals` proposals + topk = self.config.two_stage_num_proposals + topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1] + topk_coords_logits = torch.gather( + enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) + ) + + topk_coords_logits = topk_coords_logits.detach() + reference_points = topk_coords_logits.sigmoid() + init_reference_points = reference_points + pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits))) + query_embed, target = torch.split(pos_trans_out, num_channels, dim=2) + else: + query_embed, target = torch.split(query_embeds, num_channels, dim=1) + query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1) + target = target.unsqueeze(0).expand(batch_size, -1, -1) + reference_points = self.reference_points(query_embed).sigmoid() + init_reference_points = reference_points + + decoder_outputs = self.decoder( + inputs_embeds=target, + position_embeddings=query_embed, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=mask_flatten, + reference_points=reference_points, + spatial_shapes=spatial_shapes, + level_start_index=level_start_index, + valid_ratios=valid_ratios, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None) + tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs + + return tuple_outputs + + return DeformableDetrModelOutput( + init_reference_points=init_reference_points, + last_hidden_state=decoder_outputs.last_hidden_state, + intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, + intermediate_reference_points=decoder_outputs.intermediate_reference_points, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + enc_outputs_class=enc_outputs_class, + enc_outputs_coord_logits=enc_outputs_coord_logits, + ) + + +@add_start_docstrings( + """ + Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on + top, for tasks such as COCO detection. + """, + DEFORMABLE_DETR_START_DOCSTRING, +) +class DeformableDetrForObjectDetection(DeformableDetrPreTrainedModel): + def __init__(self, config: DeformableDetrConfig): + super().__init__(config) + + # Deformable DETR encoder-decoder model + self.model = DeformableDetrModel(config) + + # Detection heads on top + self.class_embed = nn.Linear(config.d_model, config.num_labels) + self.bbox_embed = DeformableDetrMLPPredictionHead( + input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 + ) + + prior_prob = 0.01 + bias_value = -math.log((1 - prior_prob) / prior_prob) + self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value + nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) + nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) + + # if two-stage, the last class_embed and bbox_embed is for region proposal generation + num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers + if config.with_box_refine: + self.class_embed = _get_clones(self.class_embed, num_pred) + self.bbox_embed = _get_clones(self.bbox_embed, num_pred) + nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) + # hack implementation for iterative bounding box refinement + self.model.decoder.bbox_embed = self.bbox_embed + else: + nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) + self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) + self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) + self.model.decoder.bbox_embed = None + if config.two_stage: + # hack implementation for two-stage + self.model.decoder.class_embed = self.class_embed + for box_embed in self.bbox_embed: + nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) + + # Initialize weights and apply final processing + self.post_init() + + # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_coord): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] + + @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=DeformableDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values, + pixel_mask=None, + decoder_attention_mask=None, + encoder_outputs=None, + inputs_embeds=None, + decoder_inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (`List[Dict]` of len `(batch_size,)`, *optional*): + Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the + following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch + respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes + in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. + + Returns: + + Examples: + + ```python + >>> from transformers import AutoFeatureExtractor, DeformableDetrForObjectDetection + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("SenseTime/deformable-detr") + >>> model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr") + + >>> inputs = feature_extractor(images=image, return_tensors="pt") + >>> outputs = model(**inputs) + + >>> # convert outputs (bounding boxes and class logits) to COCO API + >>> target_sizes = torch.tensor([image.size[::-1]]) + >>> results = feature_extractor.post_process(outputs, target_sizes=target_sizes)[0] + >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): + ... box = [round(i, 2) for i in box.tolist()] + ... # let's only keep detections with score > 0.7 + ... if score > 0.7: + ... print( + ... f"Detected {model.config.id2label[label.item()]} with confidence " + ... f"{round(score.item(), 3)} at location {box}" + ... ) + Detected cat with confidence 0.856 at location [342.19, 24.3, 640.02, 372.25] + Detected remote with confidence 0.739 at location [40.79, 72.78, 176.76, 117.25] + Detected cat with confidence 0.859 at location [16.5, 52.84, 318.25, 470.78] + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # First, sent images through DETR base model to obtain encoder + decoder outputs + outputs = self.model( + pixel_values, + pixel_mask=pixel_mask, + decoder_attention_mask=decoder_attention_mask, + encoder_outputs=encoder_outputs, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] + init_reference = outputs.init_reference_points if return_dict else outputs[0] + inter_references = outputs.intermediate_reference_points if return_dict else outputs[3] + + # class logits + predicted bounding boxes + outputs_classes = [] + outputs_coords = [] + + for level in range(hidden_states.shape[0]): + if level == 0: + reference = init_reference + else: + reference = inter_references[level - 1] + reference = inverse_sigmoid(reference) + outputs_class = self.class_embed[level](hidden_states[level]) + delta_bbox = self.bbox_embed[level](hidden_states[level]) + if reference.shape[-1] == 4: + outputs_coord_logits = delta_bbox + reference + elif reference.shape[-1] == 2: + delta_bbox[..., :2] += reference + outputs_coord_logits = delta_bbox + else: + raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}") + outputs_coord = outputs_coord_logits.sigmoid() + outputs_classes.append(outputs_class) + outputs_coords.append(outputs_coord) + outputs_class = torch.stack(outputs_classes) + outputs_coord = torch.stack(outputs_coords) + + logits = outputs_class[-1] + pred_boxes = outputs_coord[-1] + + loss, loss_dict, auxiliary_outputs = None, None, None + if labels is not None: + # First: create the matcher + matcher = DeformableDetrHungarianMatcher( + class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost + ) + # Second: create the criterion + losses = ["labels", "boxes", "cardinality"] + criterion = DeformableDetrLoss( + matcher=matcher, + num_classes=self.config.num_labels, + eos_coef=self.config.eos_coefficient, + losses=losses, + ) + criterion.to(self.device) + # Third: compute the losses, based on outputs and labels + outputs_loss = {} + outputs_loss["logits"] = logits + outputs_loss["pred_boxes"] = pred_boxes + if self.config.auxiliary_loss: + intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] + outputs_class = self.class_embed(intermediate) + outputs_coord = self.bbox_embed(intermediate).sigmoid() + auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) + outputs_loss["auxiliary_outputs"] = auxiliary_outputs + if self.config.two_stage: + enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid() + outputs["enc_outputs"] = {"pred_logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord} + + loss_dict = criterion(outputs_loss, labels) + # Fourth: compute total loss, as a weighted sum of the various losses + weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} + weight_dict["loss_giou"] = self.config.giou_loss_coefficient + if self.config.auxiliary_loss: + aux_weight_dict = {} + for i in range(self.config.decoder_layers - 1): + aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) + + if not return_dict: + if auxiliary_outputs is not None: + output = (logits, pred_boxes) + auxiliary_outputs + outputs + else: + output = (logits, pred_boxes) + outputs + tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output + + return tuple_outputs + + dict_outputs = DeformableDetrObjectDetectionOutput( + loss=loss, + loss_dict=loss_dict, + logits=logits, + pred_boxes=pred_boxes, + auxiliary_outputs=auxiliary_outputs, + last_hidden_state=outputs.last_hidden_state, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + intermediate_hidden_states=outputs.intermediate_hidden_states, + init_reference_points=outputs.init_reference_points, + intermediate_reference_points=outputs.intermediate_reference_points, + enc_outputs_class=outputs.enc_outputs_class, + enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, + ) + + return dict_outputs + + +# Copied from transformers.models.detr.modeling_detr.dice_loss +def dice_loss(inputs, targets, num_boxes): + """ + Compute the DICE loss, similar to generalized IOU for masks + + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs (0 for the negative class and 1 for the positive + class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_boxes + + +# Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss +def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + + Args: + inputs (`torch.FloatTensor` of arbitrary shape): + The predictions for each example. + targets (`torch.FloatTensor` with the same shape as `inputs`) + A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class + and 1 for the positive class). + alpha (`float`, *optional*, defaults to `0.25`): + Optional weighting factor in the range (0,1) to balance positive vs. negative examples. + gamma (`int`, *optional*, defaults to `2`): + Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. + + Returns: + Loss tensor + """ + prob = inputs.sigmoid() + ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + # add modulating factor + p_t = prob * targets + (1 - prob) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + return loss.mean(1).sum() / num_boxes + + +# taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py +class DeformableDetrLoss(nn.Module): + """ + This class computes the losses for DeformableDetrForObjectDetection. The process happens in two steps: 1) we + compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of + matched ground-truth / prediction (supervise class and box) + """ + + def __init__(self, matcher, num_classes, eos_coef, losses, focal_alpha=0.25): + """ + Create the criterion. + + A note on the num_classes parameter (copied from original repo in detr.py): "the naming of the `num_classes` + parameter of the criterion is somewhat misleading. it indeed corresponds to `max_obj_id + 1`, where max_obj_id + is the maximum id for a class in your dataset. For example, COCO has a max_obj_id of 90, so we pass + `num_classes` to be 91. As another example, for a dataset that has a single class with id 1, you should pass + `num_classes` to be 2 (max_obj_id + 1). For more details on this, check the following discussion + https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223" + + Parameters: + matcher: module able to compute a matching between targets and proposals. + num_classes: number of object categories, omitting the special no-object category. + eos_coef: relative classification weight applied to the no-object category. + losses: list of all the losses to be applied. See get_loss for list of available losses. + focal_alpha: alpha in Focal Loss. + """ + super().__init__() + + self.matcher = matcher + self.num_classes = num_classes + self.losses = losses + self.focal_alpha = focal_alpha + + def loss_labels(self, outputs, targets, indices, num_boxes, log=True): + """Classification loss (NLL) + targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] + """ + if "logits" not in outputs: + raise ValueError("No logits were found in the outputs") + + source_logits = outputs["logits"] + + idx = self._get_source_permutation_idx(indices) + target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) + target_classes = torch.full( + source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device + ) + target_classes[idx] = target_classes_o + + target_classes_onehot = torch.zeros( + [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], + dtype=source_logits.dtype, + layout=source_logits.layout, + device=source_logits.device, + ) + target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) + + target_classes_onehot = target_classes_onehot[:, :, :-1] + loss_ce = ( + sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) + * source_logits.shape[1] + ) + losses = {"loss_ce": loss_ce} + + return losses + + @torch.no_grad() + def loss_cardinality(self, outputs, targets, indices, num_boxes): + """ + Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. + + This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. + """ + logits = outputs["logits"] + device = logits.device + target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) + # Count the number of predictions that are NOT "no-object" (which is the last class) + card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) + card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) + losses = {"cardinality_error": card_err} + return losses + + def loss_boxes(self, outputs, targets, indices, num_boxes): + """ + Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. + + Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes + are expected in format (center_x, center_y, w, h), normalized by the image size. + """ + if "pred_boxes" not in outputs: + raise ValueError("No predicted boxes found in outputs") + + idx = self._get_source_permutation_idx(indices) + source_boxes = outputs["pred_boxes"][idx] + target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) + + loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") + + losses = {} + losses["loss_bbox"] = loss_bbox.sum() / num_boxes + + loss_giou = 1 - torch.diag( + generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) + ) + losses["loss_giou"] = loss_giou.sum() / num_boxes + return losses + + def _get_source_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) + source_idx = torch.cat([source for (source, _) in indices]) + return batch_idx, source_idx + + def _get_target_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) + target_idx = torch.cat([target for (_, target) in indices]) + return batch_idx, target_idx + + def get_loss(self, loss, outputs, targets, indices, num_boxes): + loss_map = { + "labels": self.loss_labels, + "cardinality": self.loss_cardinality, + "boxes": self.loss_boxes, + } + if loss not in loss_map: + raise ValueError(f"Loss {loss} not supported") + + return loss_map[loss](outputs, targets, indices, num_boxes) + + def forward(self, outputs, targets): + """ + This performs the loss computation. + + Parameters: + outputs: dict of tensors, see the output specification of the model for the format + targets: list of dicts, such that len(targets) == batch_size. + The expected keys in each dict depends on the losses applied, see each loss' doc + """ + outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} + + # Retrieve the matching between the outputs of the last layer and the targets + indices = self.matcher(outputs_without_aux, targets) + + # Compute the average number of target boxes accross all nodes, for normalization purposes + num_boxes = sum(len(t["class_labels"]) for t in targets) + num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) + # (Niels): comment out function below, distributed training to be added + # if is_dist_avail_and_initialized(): + # torch.distributed.all_reduce(num_boxes) + # (Niels) in original implementation, num_boxes is divided by get_world_size() + num_boxes = torch.clamp(num_boxes, min=1).item() + + # Compute all the requested losses + losses = {} + for loss in self.losses: + losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) + + # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if "auxiliary_outputs" in outputs: + for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): + indices = self.matcher(auxiliary_outputs, targets) + for loss in self.losses: + l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) + l_dict = {k + f"_{i}": v for k, v in l_dict.items()} + losses.update(l_dict) + + if "enc_outputs" in outputs: + enc_outputs = outputs["enc_outputs"] + bin_targets = copy.deepcopy(targets) + for bt in bin_targets: + bt["labels"] = torch.zeros_like(bt["labels"]) + indices = self.matcher(enc_outputs, bin_targets) + for loss in self.losses: + kwargs = {} + if loss == "labels": + # Logging is enabled only for the last layer + kwargs["log"] = False + l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs) + l_dict = {k + "_enc": v for k, v in l_dict.items()} + losses.update(l_dict) + + return losses + + +# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead +class DeformableDetrMLPPredictionHead(nn.Module): + """ + Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, + height and width of a bounding box w.r.t. an image. + + Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py + + """ + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +# Copied from transformers.models.detr.modeling_detr.DetrHungarianMatcher +class DeformableDetrHungarianMatcher(nn.Module): + """ + This class computes an assignment between the targets and the predictions of the network. + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more + predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are + un-matched (and thus treated as non-objects). + + Args: + class_cost: + The relative weight of the classification error in the matching cost. + bbox_cost: + The relative weight of the L1 error of the bounding box coordinates in the matching cost. + giou_cost: + The relative weight of the giou loss of the bounding box in the matching cost. + """ + + def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): + super().__init__() + requires_backends(self, ["scipy"]) + + self.class_cost = class_cost + self.bbox_cost = bbox_cost + self.giou_cost = giou_cost + if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: + raise ValueError("All costs of the Matcher can't be 0") + + @torch.no_grad() + def forward(self, outputs, targets): + """ + Args: + outputs (`dict`): + A dictionary that contains at least these entries: + * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. + targets (`List[dict]`): + A list of targets (len(targets) = batch_size), where each target is a dict containing: + * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of + ground-truth + objects in the target) containing the class labels + * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. + + Returns: + `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + batch_size, num_queries = outputs["logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + target_ids = torch.cat([v["class_labels"] for v in targets]) + target_bbox = torch.cat([v["boxes"] for v in targets]) + + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + class_cost = -out_prob[:, target_ids] + + # Compute the L1 cost between boxes + bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) + + # Compute the giou cost between boxes + giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) + + # Final cost matrix + cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost + cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() + + sizes = [len(v["boxes"]) for v in targets] + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] + return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] + + +# Copied from transformers.models.detr.modeling_detr._upcast +def _upcast(t: Tensor) -> Tensor: + # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type + if t.is_floating_point(): + return t if t.dtype in (torch.float32, torch.float64) else t.float() + else: + return t if t.dtype in (torch.int32, torch.int64) else t.int() + + +# Copied from transformers.models.detr.modeling_detr.box_area +def box_area(boxes: Tensor) -> Tensor: + """ + Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. + + Args: + boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): + Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 + < x2` and `0 <= y1 < y2`. + + Returns: + `torch.FloatTensor`: a tensor containing the area for each box. + """ + boxes = _upcast(boxes) + return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + + +# Copied from transformers.models.detr.modeling_detr.box_iou +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] + inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + return iou, union + + +# Copied from transformers.models.detr.modeling_detr.generalized_box_iou +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. + + Returns: + `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): + raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") + if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): + raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") + iou, union = box_iou(boxes1, boxes2) + + top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] + area = width_height[:, :, 0] * width_height[:, :, 1] + + return iou - (area - union) / area + + +# Copied from transformers.models.detr.modeling_detr._max_by_axis +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +# Copied from transformers.models.detr.modeling_detr.NestedTensor +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +# Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + if tensor_list[0].ndim == 3: + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + batch_shape = [len(tensor_list)] + max_size + batch_size, num_channels, height, width = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], : img.shape[2]] = False + else: + raise ValueError("Only 3-dimensional tensors are supported") + return NestedTensor(tensor, mask) diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 9d974f1a55123b..98f5f72f7e4353 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -273,7 +273,7 @@ class DetrFrozenBatchNorm2d(nn.Module): """ def __init__(self, n): - super(DetrFrozenBatchNorm2d, self).__init__() + super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) @@ -286,7 +286,7 @@ def _load_from_state_dict( if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] - super(DetrFrozenBatchNorm2d, self)._load_from_state_dict( + super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) @@ -387,14 +387,14 @@ def forward(self, pixel_values, pixel_mask): return out, pos -def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len + batch_size, source_len = mask.size() + target_len = target_len if target_len is not None else source_len - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) inverted_mask = 1.0 - expanded_mask @@ -449,12 +449,12 @@ def __init__(self, embedding_dim=256): self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): - h, w = pixel_values.shape[-2:] - i = torch.arange(w, device=pixel_values.device) - j = torch.arange(h, device=pixel_values.device) - x_emb = self.column_embeddings(i) - y_emb = self.row_embeddings(j) - pos = torch.cat([x_emb.unsqueeze(0).repeat(h, 1, 1), y_emb.unsqueeze(1).repeat(1, w, 1)], dim=-1) + height, width = pixel_values.shape[-2:] + width_values = torch.arange(width, device=pixel_values.device) + height_values = torch.arange(height, device=pixel_values.device) + x_emb = self.column_embeddings(width_values) + y_emb = self.row_embeddings(height_values) + pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) @@ -506,8 +506,8 @@ def __init__( self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): + return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings @@ -526,7 +526,7 @@ def forward( # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None - bsz, tgt_len, embed_dim = hidden_states.size() + batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: @@ -543,35 +543,36 @@ def forward( # get key, value proj if is_cross_attention: # cross_attentions - key_states = self._shape(self.k_proj(key_value_states), -1, bsz) - value_states = self._shape(self.v_proj(key_value_states_original), -1, bsz) + key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) + value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: # self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states_original), -1, bsz) + key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) + value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + proj_shape = (batch_size * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) - src_len = key_states.size(1) + source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: - if attention_mask.size() != (bsz, 1, tgt_len, src_len): + if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" + f" {attention_mask.size()}" ) - attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask + attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) @@ -580,8 +581,8 @@ def forward( # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following - attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None @@ -589,15 +590,15 @@ def forward( attn_output = torch.bmm(attn_probs, value_states) - if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) - attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) - attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) + attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) @@ -632,7 +633,8 @@ def forward( Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. position_embeddings (`torch.FloatTensor`, *optional*): position embeddings, to be added to hidden_states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under @@ -714,7 +716,8 @@ def forward( Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. position_embeddings (`torch.FloatTensor`, *optional*): position embeddings that are added to the queries and keys in the cross-attention layer. @@ -724,7 +727,8 @@ def forward( encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. @@ -957,7 +961,7 @@ def forward( # expand attention_mask if attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None @@ -1081,15 +1085,17 @@ def forward( combined_attention_mask = None if attention_mask is not None and combined_attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] combined_attention_mask = combined_attention_mask + _expand_mask( - attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + attention_mask, inputs_embeds.dtype, target_len=input_shape[-1] ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: - # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] - encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] + encoder_attention_mask = _expand_mask( + encoder_attention_mask, inputs_embeds.dtype, target_len=input_shape[-1] + ) # optional intermediate hidden states intermediate = () if self.config.auxiliary_loss else None @@ -1889,21 +1895,22 @@ def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: f Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs (0 for the negative class and 1 for the positive - class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. + inputs (`torch.FloatTensor` of arbitrary shape): + The predictions for each example. + targets (`torch.FloatTensor` with the same shape as `inputs`) + A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class + and 1 for the positive class). + alpha (`float`, *optional*, defaults to `0.25`): + Optional weighting factor in the range (0,1) to balance positive vs. negative examples. + gamma (`int`, *optional*, defaults to `2`): + Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) @@ -1981,10 +1988,10 @@ def loss_cardinality(self, outputs, targets, indices, num_boxes): """ logits = outputs["logits"] device = logits.device - tgt_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) + target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) - card_err = nn.functional.l1_loss(card_pred.float(), tgt_lengths.float()) + card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses @@ -2191,19 +2198,19 @@ def forward(self, outputs, targets): out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes - tgt_ids = torch.cat([v["class_labels"] for v in targets]) - tgt_bbox = torch.cat([v["boxes"] for v in targets]) + target_ids = torch.cat([v["class_labels"] for v in targets]) + target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. - class_cost = -out_prob[:, tgt_ids] + class_cost = -out_prob[:, target_ids] # Compute the L1 cost between boxes - bbox_cost = torch.cdist(out_bbox, tgt_bbox, p=1) + bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes - giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(tgt_bbox)) + giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost @@ -2267,15 +2274,17 @@ def generalized_box_iou(boxes1, boxes2): """ # degenerate boxes gives inf / nan results # so do an early check - assert (boxes1[:, 2:] >= boxes1[:, :2]).all() - assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): + raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") + if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): + raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) - lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) - rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) - wh = (rb - lt).clamp(min=0) # [N,M,2] - area = wh[:, :, 0] * wh[:, :, 1] + width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] + area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area @@ -2317,11 +2326,11 @@ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape + batch_size, num_channels, height, width = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False diff --git a/src/transformers/models/maskformer/modeling_maskformer.py b/src/transformers/models/maskformer/modeling_maskformer.py index 1266dbfdad84ec..110b11c6532dd7 100644 --- a/src/transformers/models/maskformer/modeling_maskformer.py +++ b/src/transformers/models/maskformer/modeling_maskformer.py @@ -1211,8 +1211,8 @@ def __init__( self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) - def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): - return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): + return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings @@ -1231,7 +1231,7 @@ def forward( # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None - bsz, tgt_len, embed_dim = hidden_states.size() + batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: @@ -1248,35 +1248,36 @@ def forward( # get key, value proj if is_cross_attention: # cross_attentions - key_states = self._shape(self.k_proj(key_value_states), -1, bsz) - value_states = self._shape(self.v_proj(key_value_states_original), -1, bsz) + key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) + value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: # self_attention - key_states = self._shape(self.k_proj(hidden_states), -1, bsz) - value_states = self._shape(self.v_proj(hidden_states_original), -1, bsz) + key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) + value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) - proj_shape = (bsz * self.num_heads, -1, self.head_dim) - query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + proj_shape = (batch_size * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) - src_len = key_states.size(1) + source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) - if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: - if attention_mask.size() != (bsz, 1, tgt_len, src_len): + if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" + f" {attention_mask.size()}" ) - attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask - attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask + attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) @@ -1285,8 +1286,8 @@ def forward( # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following - attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) - attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None @@ -1294,15 +1295,15 @@ def forward( attn_output = torch.bmm(attn_probs, value_states) - if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) - attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) - attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) + attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) @@ -1351,7 +1352,8 @@ def forward( Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. position_embeddings (`torch.FloatTensor`, *optional*): position embeddings that are added to the queries and keys in the cross-attention layer. @@ -1361,7 +1363,8 @@ def forward( encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size - `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. @@ -1416,14 +1419,14 @@ def forward( # Copied from transformers.models.detr.modeling_detr._expand_mask -def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): """ - Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. """ - bsz, src_len = mask.size() - tgt_len = tgt_len if tgt_len is not None else src_len + batch_size, source_len = mask.size() + target_len = target_len if target_len is not None else source_len - expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) inverted_mask = 1.0 - expanded_mask diff --git a/src/transformers/models/yolos/modeling_yolos.py b/src/transformers/models/yolos/modeling_yolos.py index db70670b6c0334..d41e26c4e21c52 100755 --- a/src/transformers/models/yolos/modeling_yolos.py +++ b/src/transformers/models/yolos/modeling_yolos.py @@ -874,21 +874,22 @@ def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: f Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs (0 for the negative class and 1 for the positive - class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. + inputs (`torch.FloatTensor` of arbitrary shape): + The predictions for each example. + targets (`torch.FloatTensor` with the same shape as `inputs`) + A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class + and 1 for the positive class). + alpha (`float`, *optional*, defaults to `0.25`): + Optional weighting factor in the range (0,1) to balance positive vs. negative examples. + gamma (`int`, *optional*, defaults to `2`): + Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) @@ -966,10 +967,10 @@ def loss_cardinality(self, outputs, targets, indices, num_boxes): """ logits = outputs["logits"] device = logits.device - tgt_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) + target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) - card_err = nn.functional.l1_loss(card_pred.float(), tgt_lengths.float()) + card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses @@ -1176,19 +1177,19 @@ def forward(self, outputs, targets): out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes - tgt_ids = torch.cat([v["class_labels"] for v in targets]) - tgt_bbox = torch.cat([v["boxes"] for v in targets]) + target_ids = torch.cat([v["class_labels"] for v in targets]) + target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. - class_cost = -out_prob[:, tgt_ids] + class_cost = -out_prob[:, target_ids] # Compute the L1 cost between boxes - bbox_cost = torch.cdist(out_bbox, tgt_bbox, p=1) + bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes - giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(tgt_bbox)) + giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost @@ -1252,15 +1253,17 @@ def generalized_box_iou(boxes1, boxes2): """ # degenerate boxes gives inf / nan results # so do an early check - assert (boxes1[:, 2:] >= boxes1[:, :2]).all() - assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): + raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") + if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): + raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) - lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) - rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) - wh = (rb - lt).clamp(min=0) # [N,M,2] - area = wh[:, :, 0] * wh[:, :, 1] + width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] + area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area @@ -1302,11 +1305,11 @@ def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape + batch_size, num_channels, height, width = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False diff --git a/src/transformers/utils/dummy_timm_and_vision_objects.py b/src/transformers/utils/dummy_timm_and_vision_objects.py index 9a631966370ad0..e990c33d2d317a 100644 --- a/src/transformers/utils/dummy_timm_and_vision_objects.py +++ b/src/transformers/utils/dummy_timm_and_vision_objects.py @@ -3,6 +3,30 @@ from ..utils import DummyObject, requires_backends +DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class DeformableDetrForObjectDetection(metaclass=DummyObject): + _backends = ["timm", "vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["timm", "vision"]) + + +class DeformableDetrModel(metaclass=DummyObject): + _backends = ["timm", "vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["timm", "vision"]) + + +class DeformableDetrPreTrainedModel(metaclass=DummyObject): + _backends = ["timm", "vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["timm", "vision"]) + + DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_timm_objects.py b/src/transformers/utils/dummy_timm_objects.py deleted file mode 100644 index c964d4031555be..00000000000000 --- a/src/transformers/utils/dummy_timm_objects.py +++ /dev/null @@ -1,32 +0,0 @@ -# This file is autogenerated by the command `make fix-copies`, do not edit. -from ..utils import requires_backends - - -DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None - - -class DetrForObjectDetection: - def __init__(self, *args, **kwargs): - requires_backends(self, ["timm"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["timm"]) - - -class DetrForSegmentation: - def __init__(self, *args, **kwargs): - requires_backends(self, ["timm"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["timm"]) - - -class DetrModel: - def __init__(self, *args, **kwargs): - requires_backends(self, ["timm"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["timm"]) diff --git a/tests/models/deformable_detr/__init__.py b/tests/models/deformable_detr/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/deformable_detr/test_modeling_deformable_detr.py b/tests/models/deformable_detr/test_modeling_deformable_detr.py new file mode 100644 index 00000000000000..2c5bd4eda01e1c --- /dev/null +++ b/tests/models/deformable_detr/test_modeling_deformable_detr.py @@ -0,0 +1,625 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch Deformable DETR model. """ + + +import inspect +import math +import unittest +from typing import Dict, List, Tuple + +from transformers import DeformableDetrConfig, is_timm_available, is_vision_available +from transformers.file_utils import cached_property +from transformers.testing_utils import require_timm, require_torch_gpu, require_vision, slow, torch_device + +from ...generation.test_generation_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor + + +if is_timm_available(): + import torch + + from transformers import DeformableDetrForObjectDetection, DeformableDetrModel + + +if is_vision_available(): + from PIL import Image + + from transformers import AutoFeatureExtractor + + +class DeformableDetrModelTester: + def __init__( + self, + parent, + batch_size=8, + is_training=True, + use_labels=True, + hidden_size=256, + num_hidden_layers=2, + num_attention_heads=8, + intermediate_size=4, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + num_queries=12, + num_channels=3, + image_size=196, + n_targets=8, + num_labels=91, + num_feature_levels=4, + encoder_n_points=2, + decoder_n_points=6, + ): + self.parent = parent + self.batch_size = batch_size + self.is_training = is_training + self.use_labels = use_labels + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.num_queries = num_queries + self.num_channels = num_channels + self.image_size = image_size + self.n_targets = n_targets + self.num_labels = num_labels + self.num_feature_levels = num_feature_levels + self.encoder_n_points = encoder_n_points + self.decoder_n_points = decoder_n_points + + # we also set the expected seq length for both encoder and decoder + self.encoder_seq_length = ( + math.ceil(self.image_size / 8) ** 2 + + math.ceil(self.image_size / 16) ** 2 + + math.ceil(self.image_size / 32) ** 2 + + math.ceil(self.image_size / 64) ** 2 + ) + self.decoder_seq_length = self.num_queries + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + + pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) + + labels = None + if self.use_labels: + # labels is a list of Dict (each Dict being the labels for a given example in the batch) + labels = [] + for i in range(self.batch_size): + target = {} + target["class_labels"] = torch.randint( + high=self.num_labels, size=(self.n_targets,), device=torch_device + ) + target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) + target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device) + labels.append(target) + + config = self.get_config() + return config, pixel_values, pixel_mask, labels + + def get_config(self): + return DeformableDetrConfig( + d_model=self.hidden_size, + encoder_layers=self.num_hidden_layers, + decoder_layers=self.num_hidden_layers, + encoder_attention_heads=self.num_attention_heads, + decoder_attention_heads=self.num_attention_heads, + encoder_ffn_dim=self.intermediate_size, + decoder_ffn_dim=self.intermediate_size, + dropout=self.hidden_dropout_prob, + attention_dropout=self.attention_probs_dropout_prob, + num_queries=self.num_queries, + num_labels=self.num_labels, + num_feature_levels=self.num_feature_levels, + encoder_n_points=self.encoder_n_points, + decoder_n_points=self.decoder_n_points, + ) + + def prepare_config_and_inputs_for_common(self): + config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() + inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} + return config, inputs_dict + + def create_and_check_deformable_detr_model(self, config, pixel_values, pixel_mask, labels): + model = DeformableDetrModel(config=config) + model.to(torch_device) + model.eval() + + result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) + result = model(pixel_values) + + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size)) + + def create_and_check_deformable_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): + model = DeformableDetrForObjectDetection(config=config) + model.to(torch_device) + model.eval() + + result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) + result = model(pixel_values) + + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) + self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) + + result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) + + self.parent.assertEqual(result.loss.shape, ()) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) + self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) + + +@require_timm +class DeformableDetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = (DeformableDetrModel, DeformableDetrForObjectDetection) if is_timm_available() else () + is_encoder_decoder = True + test_torchscript = False + test_pruning = False + test_head_masking = False + test_missing_keys = False + + # special case for head models + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) + + if return_labels: + if model_class.__name__ == "DeformableDetrForObjectDetection": + labels = [] + for i in range(self.model_tester.batch_size): + target = {} + target["class_labels"] = torch.ones( + size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long + ) + target["boxes"] = torch.ones( + self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float + ) + target["masks"] = torch.ones( + self.model_tester.n_targets, + self.model_tester.image_size, + self.model_tester.image_size, + device=torch_device, + dtype=torch.float, + ) + labels.append(target) + inputs_dict["labels"] = labels + + return inputs_dict + + def setUp(self): + self.model_tester = DeformableDetrModelTester(self) + self.config_tester = ConfigTester(self, config_class=DeformableDetrConfig, has_text_modality=False) + + def test_config(self): + # we don't test common_properties and arguments_init as these don't apply for Deformable DETR + self.config_tester.create_and_test_config_to_json_string() + self.config_tester.create_and_test_config_to_json_file() + self.config_tester.create_and_test_config_from_and_save_pretrained() + self.config_tester.create_and_test_config_with_num_labels() + self.config_tester.check_config_can_be_init_without_params() + + def test_deformable_detr_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_deformable_detr_model(*config_and_inputs) + + def test_deformable_detr_object_detection_head_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_deformable_detr_object_detection_head_model(*config_and_inputs) + + @unittest.skip(reason="Deformable DETR does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="Deformable DETR does not have a get_input_embeddings method") + def test_model_common_attributes(self): + pass + + @unittest.skip(reason="Deformable DETR is not a generative model") + def test_generate_without_input_ids(self): + pass + + @unittest.skip(reason="Deformable DETR does not use token embeddings") + def test_resize_tokens_embeddings(self): + pass + + @unittest.skip(reason="Feed forward chunking is not implemented") + def test_feed_forward_chunking(self): + pass + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + self.assertListEqual( + list(attentions[0].shape[-3:]), + [ + self.model_tester.num_attention_heads, + self.model_tester.num_feature_levels, + self.model_tester.encoder_n_points, + ], + ) + out_len = len(outputs) + + correct_outlen = 8 + + # loss is at first position + if "labels" in inputs_dict: + correct_outlen += 1 # loss is added to beginning + # Object Detection model returns pred_logits and pred_boxes + if model_class.__name__ == "DeformableDetrForObjectDetection": + correct_outlen += 2 + + self.assertEqual(out_len, correct_outlen) + + # decoder attentions + decoder_attentions = outputs.decoder_attentions + self.assertIsInstance(decoder_attentions, (list, tuple)) + self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(decoder_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries], + ) + + # cross attentions + cross_attentions = outputs.cross_attentions + self.assertIsInstance(cross_attentions, (list, tuple)) + self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(cross_attentions[0].shape[-3:]), + [ + self.model_tester.num_attention_heads, + self.model_tester.num_feature_levels, + self.model_tester.decoder_n_points, + ], + ) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if hasattr(self.model_tester, "num_hidden_states_types"): + added_hidden_states = self.model_tester.num_hidden_states_types + elif self.is_encoder_decoder: + added_hidden_states = 2 + else: + added_hidden_states = 1 + self.assertEqual(out_len + added_hidden_states, len(outputs)) + + self_attentions = outputs.encoder_attentions + + self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(self_attentions[0].shape[-3:]), + [ + self.model_tester.num_attention_heads, + self.model_tester.num_feature_levels, + self.model_tester.encoder_n_points, + ], + ) + + def test_model_outputs_equivalence(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + def set_nan_tensor_to_zero(t): + t[t != t] = 0 + return t + + def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): + with torch.no_grad(): + tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) + dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() + + def recursive_check(tuple_object, dict_object): + if isinstance(tuple_object, (List, Tuple)): + for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif isinstance(tuple_object, Dict): + for tuple_iterable_value, dict_iterable_value in zip( + tuple_object.values(), dict_object.values() + ): + recursive_check(tuple_iterable_value, dict_iterable_value) + elif tuple_object is None: + return + else: + self.assertTrue( + torch.allclose( + set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 + ), + msg=( + "Tuple and dict output are not equal. Difference:" + f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" + f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" + f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." + ), + ) + + recursive_check(tuple_output, dict_output) + + for model_class in self.all_model_classes: + print("Model class:", model_class) + model = model_class(config) + model.to(torch_device) + model.eval() + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class) + dict_inputs = self._prepare_for_class(inputs_dict, model_class) + check_equivalence(model, tuple_inputs, dict_inputs) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + check_equivalence(model, tuple_inputs, dict_inputs) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class) + dict_inputs = self._prepare_for_class(inputs_dict, model_class) + check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class) + dict_inputs = self._prepare_for_class(inputs_dict, model_class) + check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) + + tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) + check_equivalence( + model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} + ) + + def test_retain_grad_hidden_states_attentions(self): + # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.output_hidden_states = True + config.output_attentions = True + + # no need to test all models as different heads yield the same functionality + model_class = self.all_model_classes[0] + model = model_class(config) + model.to(torch_device) + + inputs = self._prepare_for_class(inputs_dict, model_class) + + outputs = model(**inputs) + + # we take the second output since last_hidden_state is the second item + output = outputs[1] + + encoder_hidden_states = outputs.encoder_hidden_states[0] + encoder_attentions = outputs.encoder_attentions[0] + encoder_hidden_states.retain_grad() + encoder_attentions.retain_grad() + + decoder_attentions = outputs.decoder_attentions[0] + decoder_attentions.retain_grad() + + cross_attentions = outputs.cross_attentions[0] + cross_attentions.retain_grad() + + output.flatten()[0].backward(retain_graph=True) + + self.assertIsNotNone(encoder_hidden_states.grad) + self.assertIsNotNone(encoder_attentions.grad) + self.assertIsNotNone(decoder_attentions.grad) + self.assertIsNotNone(cross_attentions.grad) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + if model.config.is_encoder_decoder: + expected_arg_names = ["pixel_values", "pixel_mask"] + expected_arg_names.extend( + ["head_mask", "decoder_head_mask", "encoder_outputs"] + if "head_mask" and "decoder_head_mask" in arg_names + else [] + ) + self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) + else: + expected_arg_names = ["pixel_values", "pixel_mask"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_different_timm_backbone(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + # let's pick a random timm backbone + config.backbone = "tf_mobilenetv3_small_075" + + for model_class in self.all_model_classes: + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if model_class.__name__ == "DeformableDetrForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + + self.assertTrue(outputs) + + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + for model_class in self.all_model_classes: + print("Model class:", model_class) + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + if param.requires_grad: + if param.requires_grad: + if ( + "level_embed" in name + or "sampling_offsets.bias" in name + or "value_proj" in name + or "output_proj" in name + or "reference_points" in name + ): + continue + self.assertIn( + ((param.data.mean() * 1e9).round() / 1e9).item(), + [0.0, 1.0], + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + +TOLERANCE = 1e-4 + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_timm +@require_vision +@slow +class DeformableDetrModelIntegrationTests(unittest.TestCase): + @cached_property + def default_feature_extractor(self): + return AutoFeatureExtractor.from_pretrained("SenseTime/deformable-detr") if is_vision_available() else None + + def test_inference_object_detection_head(self): + model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr").to(torch_device) + + feature_extractor = self.default_feature_extractor + image = prepare_img() + encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) + pixel_values = encoding["pixel_values"].to(torch_device) + pixel_mask = encoding["pixel_mask"].to(torch_device) + + with torch.no_grad(): + outputs = model(pixel_values, pixel_mask) + + expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) + self.assertEqual(outputs.logits.shape, expected_shape_logits) + + expected_logits = torch.tensor( + [[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]] + ).to(torch_device) + expected_boxes = torch.tensor( + [[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]] + ).to(torch_device) + + self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) + + expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) + self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) + self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) + + def test_inference_object_detection_head_with_box_refine_two_stage(self): + model = DeformableDetrForObjectDetection.from_pretrained( + "SenseTime/deformable-detr-with-box-refine-two-stage" + ).to(torch_device) + + feature_extractor = self.default_feature_extractor + image = prepare_img() + encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) + pixel_values = encoding["pixel_values"].to(torch_device) + pixel_mask = encoding["pixel_mask"].to(torch_device) + + with torch.no_grad(): + outputs = model(pixel_values, pixel_mask) + + expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) + self.assertEqual(outputs.logits.shape, expected_shape_logits) + + expected_logits = torch.tensor( + [[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]] + ).to(torch_device) + expected_boxes = torch.tensor( + [[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]] + ).to(torch_device) + + self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_logits, atol=1e-4)) + + expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) + self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) + self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes, atol=1e-4)) + + @require_torch_gpu + def test_inference_object_detection_head_equivalence_cpu_gpu(self): + feature_extractor = self.default_feature_extractor + image = prepare_img() + encoding = feature_extractor(images=image, return_tensors="pt") + pixel_values = encoding["pixel_values"] + pixel_mask = encoding["pixel_mask"] + + # 1. run model on CPU + model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr-single-scale") + + with torch.no_grad(): + cpu_outputs = model(pixel_values, pixel_mask) + + # 2. run model on GPU + model.to("cuda") + + with torch.no_grad(): + gpu_outputs = model(pixel_values.to("cuda"), pixel_mask.to("cuda")) + + # 3. assert equivalence + for key in cpu_outputs.keys(): + assert torch.allclose(cpu_outputs[key], gpu_outputs[key].cpu(), atol=1e-4) + + expected_logits = torch.tensor( + [[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]] + ) + assert torch.allclose(cpu_outputs.logits[0, :3, :3], expected_logits, atol=1e-4) diff --git a/tests/pipelines/test_pipelines_object_detection.py b/tests/pipelines/test_pipelines_object_detection.py index 538f3131515791..b1d43f8a8795c6 100644 --- a/tests/pipelines/test_pipelines_object_detection.py +++ b/tests/pipelines/test_pipelines_object_detection.py @@ -53,6 +53,12 @@ class ObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCase model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, feature_extractor): + if model.__class__.__name__ == "DeformableDetrForObjectDetection": + self.skipTest( + """Deformable DETR requires a custom CUDA kernel. + """ + ) + object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] diff --git a/utils/check_repo.py b/utils/check_repo.py index 207592c91c961a..ea3b997f1f1261 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -46,6 +46,8 @@ # Being in this list is an exception and should **not** be the rule. IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [ # models to ignore for not tested + "DeformableDetrEncoder", # Building part of bigger (tested) model. + "DeformableDetrDecoder", # Building part of bigger (tested) model. "OPTDecoder", # Building part of bigger (tested) model. "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. "SegformerDecodeHead", # Building part of bigger (tested) model. diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index abe539cf4613b5..0a27bdb527fc94 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -28,6 +28,7 @@ src/transformers/models/data2vec/modeling_data2vec_audio.py src/transformers/models/data2vec/modeling_data2vec_vision.py src/transformers/models/deberta/modeling_deberta.py src/transformers/models/deberta_v2/modeling_deberta_v2.py +src/transformers/models/deformable_detr/modeling_deformable_detr.py src/transformers/models/deit/modeling_deit.py src/transformers/models/deit/modeling_tf_deit.py src/transformers/models/detr/modeling_detr.py From 9f4acd059f9c2a195a3ff71c5bc34cb5512b0446 Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Wed, 14 Sep 2022 15:36:29 +0530 Subject: [PATCH 282/539] Generate: add missing comments after refactoring of generate() (#18981) --- src/transformers/generation_utils.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 84f1a6f0392a38..b0e7b1411626e9 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -2240,6 +2240,8 @@ def beam_search( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) + # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens + # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) @@ -2303,6 +2305,7 @@ def beam_search( vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) + # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) @@ -2873,9 +2876,9 @@ def group_beam_search( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) - beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) - # initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in + # initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in # the same group don't produce same tokens everytime. + beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) beam_scores[:, ::num_sub_beams] = 0 beam_scores = beam_scores.view((batch_size * num_beams,)) @@ -2951,6 +2954,7 @@ def group_beam_search( # reshape for beam search next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) + # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True ) @@ -3235,6 +3239,8 @@ def constrained_beam_search( f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." ) + # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens + # of the first beam are considered to avoid sampling the exact same tokens across all beams. beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) @@ -3301,6 +3307,7 @@ def constrained_beam_search( vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) + # Sample 2 next tokens for each beam (so we have some spare tokens and match output of beam search) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) From a2a3afbc8d26d6170909365ffba6bd75e186255f Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 14 Sep 2022 07:19:02 -0400 Subject: [PATCH 283/539] PyTorch >= 1.7.0 and TensorFlow >= 2.4.0 (#19016) --- setup.py | 4 +-- src/transformers/activations.py | 14 ++------- src/transformers/dependency_versions_table.py | 4 +-- .../models/albert/modeling_albert.py | 16 +++------- src/transformers/models/bert/modeling_bert.py | 16 +++------- .../models/big_bird/modeling_big_bird.py | 11 +++---- .../models/convbert/modeling_convbert.py | 16 +++------- .../models/data2vec/modeling_data2vec_text.py | 16 +++------- .../modeling_decision_transformer.py | 25 +++------------ .../models/distilbert/modeling_distilbert.py | 14 +++------ .../models/electra/modeling_electra.py | 16 +++------- .../models/ernie/modeling_ernie.py | 16 +++------- .../models/flaubert/modeling_flaubert.py | 8 ++--- .../models/flava/modeling_flava.py | 10 ++---- src/transformers/models/fnet/modeling_fnet.py | 11 +++---- src/transformers/models/gpt2/modeling_gpt2.py | 23 ++------------ .../models/imagegpt/modeling_imagegpt.py | 23 ++------------ .../models/mctct/modeling_mctct.py | 12 +++---- .../models/nezha/modeling_nezha.py | 16 +++------- .../nystromformer/modeling_nystromformer.py | 18 ++++------- .../models/qdqbert/modeling_qdqbert.py | 11 +++---- .../models/realm/modeling_realm.py | 16 +++------- .../models/roberta/modeling_roberta.py | 16 +++------- src/transformers/models/vilt/modeling_vilt.py | 16 +++------- .../xlm_roberta_xl/modeling_xlm_roberta_xl.py | 16 +++------- src/transformers/models/yoso/modeling_yoso.py | 18 ++++------- src/transformers/pytorch_utils.py | 3 +- src/transformers/trainer.py | 31 +++---------------- src/transformers/trainer_pt_utils.py | 8 +---- ...ng_{{cookiecutter.lowercase_modelname}}.py | 12 +++---- 30 files changed, 111 insertions(+), 325 deletions(-) diff --git a/setup.py b/setup.py index 84bd8f5d6eef50..68a62b785d2f04 100644 --- a/setup.py +++ b/setup.py @@ -155,13 +155,13 @@ "librosa", "starlette", "tensorflow-cpu>=2.3", - "tensorflow>=2.3", + "tensorflow>=2.4", "tensorflow-text", "tf2onnx", "timeout-decorator", "timm", "tokenizers>=0.11.1,!=0.11.3,<0.13", - "torch>=1.0,!=0.12.0", + "torch>=1.7,!=1.12.0", "torchaudio", "pyctcdecode>=0.3.0", "tqdm>=4.27", diff --git a/src/transformers/activations.py b/src/transformers/activations.py index 5d413bba728b7b..f7c9046134cf52 100644 --- a/src/transformers/activations.py +++ b/src/transformers/activations.py @@ -44,7 +44,7 @@ class GELUActivation(nn.Module): def __init__(self, use_gelu_python: bool = False): super().__init__() - if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.4") or use_gelu_python: + if use_gelu_python: self.act = self._gelu_python else: self.act = nn.functional.gelu @@ -108,18 +108,8 @@ class SiLUActivation(nn.Module): later. """ - def __init__(self): - super().__init__() - if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"): - self.act = self._silu_python - else: - self.act = nn.functional.silu - - def _silu_python(self, input: Tensor) -> Tensor: - return input * torch.sigmoid(input) - def forward(self, input: Tensor) -> Tensor: - return self.act(input) + return nn.functional.silu(input) class MishActivation(nn.Module): diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 434b87048405f6..74c6d00c2a5885 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -61,13 +61,13 @@ "librosa": "librosa", "starlette": "starlette", "tensorflow-cpu": "tensorflow-cpu>=2.3", - "tensorflow": "tensorflow>=2.3", + "tensorflow": "tensorflow>=2.4", "tensorflow-text": "tensorflow-text", "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.13", - "torch": "torch>=1.0,!=0.12.0", + "torch": "torch>=1.7,!=1.12.0", "torchaudio": "torchaudio", "pyctcdecode": "pyctcdecode>=0.3.0", "tqdm": "tqdm>=4.27", diff --git a/src/transformers/models/albert/modeling_albert.py b/src/transformers/models/albert/modeling_albert.py index 78df7911a2a0c4..95234259a9baeb 100755 --- a/src/transformers/models/albert/modeling_albert.py +++ b/src/transformers/models/albert/modeling_albert.py @@ -34,12 +34,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -216,12 +211,9 @@ def __init__(self, config: AlbertConfig): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward def forward( diff --git a/src/transformers/models/bert/modeling_bert.py b/src/transformers/models/bert/modeling_bert.py index 495bbe2e49a9cb..11664f66cba8e7 100755 --- a/src/transformers/models/bert/modeling_bert.py +++ b/src/transformers/models/bert/modeling_bert.py @@ -40,12 +40,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -199,12 +194,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) def forward( self, diff --git a/src/transformers/models/big_bird/modeling_big_bird.py b/src/transformers/models/big_bird/modeling_big_bird.py index fb30671927f469..8b330397db0dc0 100755 --- a/src/transformers/models/big_bird/modeling_big_bird.py +++ b/src/transformers/models/big_bird/modeling_big_bird.py @@ -37,7 +37,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, is_torch_greater_than_1_6 +from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -259,12 +259,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) # End copy self.rescale_embeddings = config.rescale_embeddings diff --git a/src/transformers/models/convbert/modeling_convbert.py b/src/transformers/models/convbert/modeling_convbert.py index 136685ad6c1ce1..17978006d1236d 100755 --- a/src/transformers/models/convbert/modeling_convbert.py +++ b/src/transformers/models/convbert/modeling_convbert.py @@ -35,12 +35,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, SequenceSummary -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_convbert import ConvBertConfig @@ -198,12 +193,9 @@ def __init__(self, config): self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) def forward( self, diff --git a/src/transformers/models/data2vec/modeling_data2vec_text.py b/src/transformers/models/data2vec/modeling_data2vec_text.py index 8a7d6308bf5744..56b78d90be63ee 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_text.py +++ b/src/transformers/models/data2vec/modeling_data2vec_text.py @@ -34,12 +34,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -87,12 +82,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) # End copy self.padding_idx = config.pad_token_id diff --git a/src/transformers/models/decision_transformer/modeling_decision_transformer.py b/src/transformers/models/decision_transformer/modeling_decision_transformer.py index 77804e75547770..723a6139cca1f5 100755 --- a/src/transformers/models/decision_transformer/modeling_decision_transformer.py +++ b/src/transformers/models/decision_transformer/modeling_decision_transformer.py @@ -22,15 +22,12 @@ import torch import torch.utils.checkpoint from torch import nn +from torch.cuda.amp import autocast from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - Conv1D, - find_pruneable_heads_and_indices, - is_torch_greater_or_equal_than_1_6, - prune_conv1d_layer, -) +from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import ( ModelOutput, add_start_docstrings, @@ -38,15 +35,6 @@ logging, replace_return_docstrings, ) - - -if is_torch_greater_or_equal_than_1_6: - is_amp_available = True - from torch.cuda.amp import autocast -else: - is_amp_available = False - -from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions from .configuration_decision_transformer import DecisionTransformerConfig @@ -235,12 +223,7 @@ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, hea scale_factor /= float(self.layer_idx + 1) # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) - if is_amp_available: - with autocast(enabled=False): - q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) - attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) - attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) - else: + with autocast(enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) diff --git a/src/transformers/models/distilbert/modeling_distilbert.py b/src/transformers/models/distilbert/modeling_distilbert.py index 1282788a57dd23..a2713128e901a3 100755 --- a/src/transformers/models/distilbert/modeling_distilbert.py +++ b/src/transformers/models/distilbert/modeling_distilbert.py @@ -39,12 +39,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -106,10 +101,9 @@ def __init__(self, config: PretrainedConfig): self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12) self.dropout = nn.Dropout(config.dropout) - if is_torch_greater_than_1_6: - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False - ) + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) def forward(self, input_ids: torch.Tensor) -> torch.Tensor: """ diff --git a/src/transformers/models/electra/modeling_electra.py b/src/transformers/models/electra/modeling_electra.py index c215256b3e5f4e..5eee312684cf4a 100644 --- a/src/transformers/models/electra/modeling_electra.py +++ b/src/transformers/models/electra/modeling_electra.py @@ -36,12 +36,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, SequenceSummary -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -169,12 +164,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward def forward( diff --git a/src/transformers/models/ernie/modeling_ernie.py b/src/transformers/models/ernie/modeling_ernie.py index 0af6be3a707f0a..ce3f735f7d846b 100644 --- a/src/transformers/models/ernie/modeling_ernie.py +++ b/src/transformers/models/ernie/modeling_ernie.py @@ -38,12 +38,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -96,12 +91,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) def forward( self, diff --git a/src/transformers/models/flaubert/modeling_flaubert.py b/src/transformers/models/flaubert/modeling_flaubert.py index 4733c5d09b855f..c165e703536c8c 100644 --- a/src/transformers/models/flaubert/modeling_flaubert.py +++ b/src/transformers/models/flaubert/modeling_flaubert.py @@ -22,7 +22,6 @@ from torch import nn from ...modeling_outputs import BaseModelOutput -from ...pytorch_utils import is_torch_greater_than_1_6 from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from ..xlm.modeling_xlm import ( XLMForMultipleChoice, @@ -139,10 +138,9 @@ def __init__(self, config): # , dico, is_encoder, with_output): super().__init__(config) self.layerdrop = getattr(config, "layerdrop", 0.0) self.pre_norm = getattr(config, "pre_norm", False) - if is_torch_greater_than_1_6: - self.register_buffer( - "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False - ) + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) @add_start_docstrings_to_model_forward(FLAUBERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( diff --git a/src/transformers/models/flava/modeling_flava.py b/src/transformers/models/flava/modeling_flava.py index 9201a987609a80..9a958a525c1b81 100644 --- a/src/transformers/models/flava/modeling_flava.py +++ b/src/transformers/models/flava/modeling_flava.py @@ -29,7 +29,6 @@ from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer -from ...pytorch_utils import is_torch_greater_than_1_6 from ...utils import ( ModelOutput, add_start_docstrings, @@ -392,12 +391,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) def forward( self, diff --git a/src/transformers/models/fnet/modeling_fnet.py b/src/transformers/models/fnet/modeling_fnet.py index e2347adce961c2..a3a7193054b9be 100755 --- a/src/transformers/models/fnet/modeling_fnet.py +++ b/src/transformers/models/fnet/modeling_fnet.py @@ -43,7 +43,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import apply_chunking_to_forward, is_torch_greater_than_1_6 +from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -117,12 +117,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 2dde68aad8666b..80bca016348e29 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -23,22 +23,9 @@ import torch import torch.utils.checkpoint from torch import nn +from torch.cuda.amp import autocast from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss -from ...pytorch_utils import ( - Conv1D, - find_pruneable_heads_and_indices, - is_torch_greater_or_equal_than_1_6, - prune_conv1d_layer, -) - - -if is_torch_greater_or_equal_than_1_6: - is_amp_available = True - from torch.cuda.amp import autocast -else: - is_amp_available = False - from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, @@ -47,6 +34,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, SequenceSummary +from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -247,12 +235,7 @@ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, hea scale_factor /= float(self.layer_idx + 1) # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) - if is_amp_available: - with autocast(enabled=False): - q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) - attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) - attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) - else: + with autocast(enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) diff --git a/src/transformers/models/imagegpt/modeling_imagegpt.py b/src/transformers/models/imagegpt/modeling_imagegpt.py index e71ea4a272c2d0..88bc042b21cc62 100755 --- a/src/transformers/models/imagegpt/modeling_imagegpt.py +++ b/src/transformers/models/imagegpt/modeling_imagegpt.py @@ -22,22 +22,9 @@ import torch import torch.utils.checkpoint from torch import nn +from torch.cuda.amp import autocast from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss -from ...pytorch_utils import ( - Conv1D, - find_pruneable_heads_and_indices, - is_torch_greater_or_equal_than_1_6, - prune_conv1d_layer, -) - - -if is_torch_greater_or_equal_than_1_6: - is_amp_available = True - from torch.cuda.amp import autocast -else: - is_amp_available = False - from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, @@ -45,6 +32,7 @@ SequenceClassifierOutputWithPast, ) from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_imagegpt import ImageGPTConfig @@ -299,12 +287,7 @@ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, hea scale_factor /= float(self.layer_idx + 1) # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk)) - if is_amp_available: - with autocast(enabled=False): - q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) - attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) - attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) - else: + with autocast(enabled=False): q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len) attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor) attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) diff --git a/src/transformers/models/mctct/modeling_mctct.py b/src/transformers/models/mctct/modeling_mctct.py index 3eb59a0c419beb..0313379510c4b7 100755 --- a/src/transformers/models/mctct/modeling_mctct.py +++ b/src/transformers/models/mctct/modeling_mctct.py @@ -33,7 +33,6 @@ find_pruneable_heads_and_indices, prune_linear_layer, ) -from ...pytorch_utils import is_torch_greater_than_1_6 from ...utils import logging from .configuration_mctct import MCTCTConfig @@ -153,12 +152,11 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), - persistent=False, - ) + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), + persistent=False, + ) def forward( self, input_features=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 diff --git a/src/transformers/models/nezha/modeling_nezha.py b/src/transformers/models/nezha/modeling_nezha.py index 4fa38b3ed48f09..b85d17444c613d 100644 --- a/src/transformers/models/nezha/modeling_nezha.py +++ b/src/transformers/models/nezha/modeling_nezha.py @@ -38,12 +38,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, @@ -187,12 +182,9 @@ def __init__(self, config): # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros((1, config.max_position_embeddings), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros((1, config.max_position_embeddings), dtype=torch.long), persistent=False + ) def forward( self, diff --git a/src/transformers/models/nystromformer/modeling_nystromformer.py b/src/transformers/models/nystromformer/modeling_nystromformer.py index e1f352d2c89798..e2a41a8616588a 100755 --- a/src/transformers/models/nystromformer/modeling_nystromformer.py +++ b/src/transformers/models/nystromformer/modeling_nystromformer.py @@ -33,12 +33,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_nystromformer import NystromformerConfig @@ -72,12 +67,11 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), - persistent=False, - ) + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), + persistent=False, + ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: diff --git a/src/transformers/models/qdqbert/modeling_qdqbert.py b/src/transformers/models/qdqbert/modeling_qdqbert.py index 35890625b1ffbd..9e653e58e24944 100755 --- a/src/transformers/models/qdqbert/modeling_qdqbert.py +++ b/src/transformers/models/qdqbert/modeling_qdqbert.py @@ -39,7 +39,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import find_pruneable_heads_and_indices, is_torch_greater_than_1_6, prune_linear_layer +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -166,12 +166,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) def forward( self, diff --git a/src/transformers/models/realm/modeling_realm.py b/src/transformers/models/realm/modeling_realm.py index 6ee2b1fd14b402..b64a6ef72bac63 100644 --- a/src/transformers/models/realm/modeling_realm.py +++ b/src/transformers/models/realm/modeling_realm.py @@ -31,12 +31,7 @@ ModelOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_realm import RealmConfig @@ -185,12 +180,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) def forward( self, diff --git a/src/transformers/models/roberta/modeling_roberta.py b/src/transformers/models/roberta/modeling_roberta.py index 46add0be500195..d27fc5fa342524 100644 --- a/src/transformers/models/roberta/modeling_roberta.py +++ b/src/transformers/models/roberta/modeling_roberta.py @@ -35,12 +35,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -87,12 +82,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) # End copy self.padding_idx = config.pad_token_id diff --git a/src/transformers/models/vilt/modeling_vilt.py b/src/transformers/models/vilt/modeling_vilt.py index dab78c0bce8687..eefa8f641ff187 100755 --- a/src/transformers/models/vilt/modeling_vilt.py +++ b/src/transformers/models/vilt/modeling_vilt.py @@ -34,12 +34,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - find_pruneable_heads_and_indices, - is_torch_greater_or_equal_than_1_10, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import find_pruneable_heads_and_indices, is_torch_greater_or_equal_than_1_10, prune_linear_layer from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_vilt import ViltConfig @@ -255,12 +250,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: diff --git a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py index e0634d9a6ae668..75e4e72fa4b54f 100644 --- a/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py +++ b/src/transformers/models/xlm_roberta_xl/modeling_xlm_roberta_xl.py @@ -34,12 +34,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -80,12 +75,9 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long), - persistent=False, - ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) # End copy self.padding_idx = config.pad_token_id diff --git a/src/transformers/models/yoso/modeling_yoso.py b/src/transformers/models/yoso/modeling_yoso.py index 085d46bdfb5504..682f04d06dd88c 100644 --- a/src/transformers/models/yoso/modeling_yoso.py +++ b/src/transformers/models/yoso/modeling_yoso.py @@ -34,12 +34,7 @@ TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel -from ...pytorch_utils import ( - apply_chunking_to_forward, - find_pruneable_heads_and_indices, - is_torch_greater_than_1_6, - prune_linear_layer, -) +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_yoso import YosoConfig @@ -261,12 +256,11 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) + 2) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), - persistent=False, - ) + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), + persistent=False, + ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: diff --git a/src/transformers/pytorch_utils.py b/src/transformers/pytorch_utils.py index 571a5d7d3c941b..d94e049b5e8aa2 100644 --- a/src/transformers/pytorch_utils.py +++ b/src/transformers/pytorch_utils.py @@ -26,8 +26,7 @@ logger = logging.get_logger(__name__) parsed_torch_version_base = version.parse(version.parse(torch.__version__).base_version) -is_torch_greater_or_equal_than_1_6 = parsed_torch_version_base >= version.parse("1.6.0") -is_torch_greater_than_1_6 = parsed_torch_version_base > version.parse("1.6.0") + is_torch_less_than_1_8 = parsed_torch_version_base < version.parse("1.8.0") is_torch_greater_or_equal_than_1_10 = parsed_torch_version_base >= version.parse("1.10") is_torch_less_than_1_11 = parsed_torch_version_base < version.parse("1.11") diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index c1cc3f92acb4d5..27e44ea0ba0bd4 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -71,12 +71,7 @@ from .modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model from .models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES from .optimization import Adafactor, get_scheduler -from .pytorch_utils import ( - ALL_LAYERNORM_LAYERS, - is_torch_greater_or_equal_than_1_6, - is_torch_greater_or_equal_than_1_10, - is_torch_less_than_1_11, -) +from .pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_10, is_torch_less_than_1_11 from .tokenization_utils_base import PreTrainedTokenizerBase from .trainer_callback import ( CallbackHandler, @@ -155,9 +150,7 @@ from .utils.generic import ContextManagers -_is_torch_generator_available = False -_is_native_cuda_amp_available = False -_is_native_cpu_amp_available = False +_is_native_cpu_amp_available = is_torch_greater_or_equal_than_1_10 DEFAULT_CALLBACKS = [DefaultFlowCallback] DEFAULT_PROGRESS_CALLBACK = ProgressCallback @@ -170,13 +163,6 @@ if is_apex_available(): from apex import amp -if is_torch_greater_or_equal_than_1_6: - _is_torch_generator_available = True - _is_native_cuda_amp_available = True - -if is_torch_greater_or_equal_than_1_10: - _is_native_cpu_amp_available = True - if is_datasets_available(): import datasets @@ -565,12 +551,7 @@ def __init__( else: raise ValueError("Tried to use cpu amp but native cpu amp is not available") else: - if _is_native_cuda_amp_available: - args.half_precision_backend = "cuda_amp" - elif args.bf16: - raise ValueError("Tried to use `bf16` but native amp is not available") - else: - args.half_precision_backend = "apex" + args.half_precision_backend = "cuda_amp" logger.info(f"Using {args.half_precision_backend} half precision backend") @@ -781,7 +762,7 @@ def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: return None generator = None - if self.args.world_size <= 1 and _is_torch_generator_available: + if self.args.world_size <= 1: generator = torch.Generator() # for backwards compatibility, we generate a seed here (which is sampled from a generator seeded with # `args.seed`) if data_seed isn't provided. @@ -826,9 +807,7 @@ def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: else: if self.args.world_size <= 1: - if _is_torch_generator_available: - return RandomSampler(self.train_dataset, generator=generator) - return RandomSampler(self.train_dataset) + return RandomSampler(self.train_dataset, generator=generator) elif ( self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL] and not self.args.dataloader_drop_last diff --git a/src/transformers/trainer_pt_utils.py b/src/transformers/trainer_pt_utils.py index 7baa7a46e95932..7ff0eb51a84e16 100644 --- a/src/transformers/trainer_pt_utils.py +++ b/src/transformers/trainer_pt_utils.py @@ -31,7 +31,6 @@ import numpy as np import torch import torch.distributed as dist -from packaging import version from torch import nn from torch.utils.data import Dataset, IterableDataset, RandomSampler, Sampler from torch.utils.data.distributed import DistributedSampler @@ -831,12 +830,7 @@ def _get_learning_rate(self): else: raise else: - last_lr = ( - # backward compatibility for pytorch schedulers - self.lr_scheduler.get_last_lr()[0] - if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.4") - else self.lr_scheduler.get_lr()[0] - ) + last_lr = self.lr_scheduler.get_last_lr()[0] if torch.is_tensor(last_lr): last_lr = last_lr.item() return last_lr diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py index cbe8153c0ec70a..3bd3b1894accb5 100755 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_{{cookiecutter.lowercase_modelname}}.py @@ -47,7 +47,6 @@ apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, - is_torch_greater_than_1_6, ) from ...utils import logging from .configuration_{{cookiecutter.lowercase_modelname}} import {{cookiecutter.camelcase_modelname}}Config @@ -157,12 +156,11 @@ def __init__(self, config): # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") - if is_torch_greater_than_1_6: - self.register_buffer( - "token_type_ids", - torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), - persistent=False, - ) + self.register_buffer( + "token_type_ids", + torch.zeros(self.position_ids.size(), dtype=torch.long, device=self.position_ids.device), + persistent=False, + ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 From 6f8f2f6a77457f993a582bde2bff92af863a2d06 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 14 Sep 2022 07:36:12 -0400 Subject: [PATCH 284/539] Make AutoProcessor a magic loading class for all modalities (#18963) * Make AutoProcessor a magic loading class for all modalities * Quality --- .../models/auto/processing_auto.py | 24 +++++++++++++++---- tests/models/auto/test_processor_auto.py | 8 +++++++ 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index 7eff84c5d56714..07b2811a16481b 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -23,7 +23,7 @@ from ...dynamic_module_utils import get_class_from_dynamic_module from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils import TOKENIZER_CONFIG_FILE -from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging +from ...utils import FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, @@ -31,6 +31,8 @@ model_type_to_module_name, replace_list_option_in_docstrings, ) +from .feature_extraction_auto import AutoFeatureExtractor +from .tokenization_auto import AutoTokenizer logger = logging.get_logger(__name__) @@ -250,10 +252,24 @@ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): if type(config) in PROCESSOR_MAPPING: return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs) + # At this stage, there doesn't seem to be a `Processor` class available for this model, so let's try a + # tokenizer. + try: + return AutoTokenizer.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + except Exception: + try: + return AutoFeatureExtractor.from_pretrained( + pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs + ) + except Exception: + pass + raise ValueError( - f"Unrecognized processor in {pretrained_model_name_or_path}. Should have a `processor_type` key in " - f"its {FEATURE_EXTRACTOR_NAME}, or one of the following `model_type` keys in its {CONFIG_NAME}: " - f"{', '.join(c for c in PROCESSOR_MAPPING_NAMES.keys())}" + f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a " + "tokenizer or a feature extractor for this model. Make sure the repository contains the files of at least " + "one of those processing classes." ) @staticmethod diff --git a/tests/models/auto/test_processor_auto.py b/tests/models/auto/test_processor_auto.py index 2f99d5c379bc3b..fe57078a6170c8 100644 --- a/tests/models/auto/test_processor_auto.py +++ b/tests/models/auto/test_processor_auto.py @@ -202,6 +202,14 @@ def test_new_processor_registration(self): if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] + def test_auto_processor_creates_tokenizer(self): + processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert") + self.assertEqual(processor.__class__.__name__, "BertTokenizerFast") + + def test_auto_processor_creates_feature_extractor(self): + processor = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext") + self.assertEqual(processor.__class__.__name__, "ConvNextFeatureExtractor") + @is_staging_test class ProcessorPushToHubTester(unittest.TestCase): From fc21c9be62483d06adae6239ebe6ca77c2cb6269 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Wed, 14 Sep 2022 13:52:54 +0200 Subject: [PATCH 285/539] [CookieCutter] Clarify questions (#18959) * Clarify cookiecutter questions * Update first question Co-authored-by: Niels Rogge --- .../commands/add_new_model_like.py | 36 +++++++++++-------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/src/transformers/commands/add_new_model_like.py b/src/transformers/commands/add_new_model_like.py index c49f3ad8690406..a5d4e97ffd7120 100644 --- a/src/transformers/commands/add_new_model_like.py +++ b/src/transformers/commands/add_new_model_like.py @@ -1442,7 +1442,9 @@ def get_user_input(): # Get old model type valid_model_type = False while not valid_model_type: - old_model_type = input("What is the model you would like to duplicate? ") + old_model_type = input( + "What is the model you would like to duplicate? Please provide the lowercase `model_type` (e.g. roberta): " + ) if old_model_type in model_types: valid_model_type = True else: @@ -1465,38 +1467,42 @@ def get_user_input(): "We couldn't find the name of the base checkpoint for that model, please enter it here." ) - model_name = get_user_field("What is the name for your new model?") + model_name = get_user_field( + "What is the name (with no special casing) for your new model in the paper (e.g. RoBERTa)? " + ) default_patterns = ModelPatterns(model_name, model_name) model_type = get_user_field( - "What identifier would you like to use for the model type of this model?", + "What identifier would you like to use for the `model_type` of this model? ", default_value=default_patterns.model_type, ) model_lower_cased = get_user_field( - "What name would you like to use for the module of this model?", + "What lowercase name would you like to use for the module (folder) of this model? ", default_value=default_patterns.model_lower_cased, ) model_camel_cased = get_user_field( - "What prefix (camel-cased) would you like to use for the model classes of this model?", + "What prefix (camel-cased) would you like to use for the model classes of this model (e.g. Roberta)? ", default_value=default_patterns.model_camel_cased, ) model_upper_cased = get_user_field( - "What prefix (upper-cased) would you like to use for the constants relative to this model?", + "What prefix (upper-cased) would you like to use for the constants relative to this model? ", default_value=default_patterns.model_upper_cased, ) config_class = get_user_field( - "What will be the name of the config class for this model?", default_value=f"{model_camel_cased}Config" + "What will be the name of the config class for this model? ", default_value=f"{model_camel_cased}Config" + ) + checkpoint = get_user_field( + "Please give a checkpoint identifier (on the model Hub) for this new model (e.g. facebook/roberta-base): " ) - checkpoint = get_user_field("Please give a checkpoint identifier (on the model Hub) for this new model.") old_processing_classes = [ c for c in [old_feature_extractor_class, old_tokenizer_class, old_processor_class] if c is not None ] old_processing_classes = ", ".join(old_processing_classes) keep_processing = get_user_field( - f"Will your new model use the same processing class as {old_model_type} ({old_processing_classes})?", + f"Will your new model use the same processing class as {old_model_type} ({old_processing_classes}) (yes/no)? ", convert_to=convert_to_bool, - fallback_message="Please answer yes/no, y/n, true/false or 1/0.", + fallback_message="Please answer yes/no, y/n, true/false or 1/0. ", ) if keep_processing: feature_extractor_class = old_feature_extractor_class @@ -1505,21 +1511,21 @@ def get_user_input(): else: if old_tokenizer_class is not None: tokenizer_class = get_user_field( - "What will be the name of the tokenizer class for this model?", + "What will be the name of the tokenizer class for this model? ", default_value=f"{model_camel_cased}Tokenizer", ) else: tokenizer_class = None if old_feature_extractor_class is not None: feature_extractor_class = get_user_field( - "What will be the name of the feature extractor class for this model?", + "What will be the name of the feature extractor class for this model? ", default_value=f"{model_camel_cased}FeatureExtractor", ) else: feature_extractor_class = None if old_processor_class is not None: processor_class = get_user_field( - "What will be the name of the processor class for this model?", + "What will be the name of the processor class for this model? ", default_value=f"{model_camel_cased}Processor", ) else: @@ -1539,7 +1545,7 @@ def get_user_input(): ) add_copied_from = get_user_field( - "Should we add # Copied from statements when creating the new modeling file?", + "Should we add # Copied from statements when creating the new modeling file (yes/no)? ", convert_to=convert_to_bool, default_value="yes", fallback_message="Please answer yes/no, y/n, true/false or 1/0.", @@ -1547,7 +1553,7 @@ def get_user_input(): all_frameworks = get_user_field( "Should we add a version of your new model in all the frameworks implemented by" - f" {old_model_type} ({old_frameworks})?", + f" {old_model_type} ({old_frameworks}) (yes/no)? ", convert_to=convert_to_bool, default_value="yes", fallback_message="Please answer yes/no, y/n, true/false or 1/0.", From 77ea35b93aa72efb89498a4d4c10a0b793fcc2a1 Mon Sep 17 00:00:00 2001 From: Partho Date: Wed, 14 Sep 2022 17:28:05 +0530 Subject: [PATCH 286/539] added type hints (#19015) --- src/transformers/models/fsmt/modeling_fsmt.py | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/transformers/models/fsmt/modeling_fsmt.py b/src/transformers/models/fsmt/modeling_fsmt.py index 8270a70e9ee927..db53b888f9ba60 100644 --- a/src/transformers/models/fsmt/modeling_fsmt.py +++ b/src/transformers/models/fsmt/modeling_fsmt.py @@ -468,12 +468,12 @@ def __init__(self, config: FSMTConfig, embed_tokens): def forward( self, - input_ids, - attention_mask=None, - head_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, + input_ids: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, ): """ Args: @@ -669,18 +669,18 @@ def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding): def forward( self, - input_ids, - encoder_hidden_states, - encoder_padding_mask, - decoder_padding_mask, - decoder_causal_mask, - head_mask=None, - cross_attn_head_mask=None, - past_key_values=None, - use_cache=False, - output_attentions=False, - output_hidden_states=False, - return_dict=True, + input_ids: torch.Tensor, + encoder_hidden_states: torch.Tensor, + encoder_padding_mask: torch.Tensor, + decoder_padding_mask: torch.Tensor, + decoder_causal_mask: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, ): """ Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., From 77b18783c2d1881c4f50bc98d877826d26342429 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 14 Sep 2022 14:45:00 +0200 Subject: [PATCH 287/539] Fix CI for `PegasusX` (#19025) * Skip test_torchscript_output_attentions for PegasusXModelTest * fix test_inference_no_head * fix test_inference_head * fix test_seq_to_seq_generation Co-authored-by: ydshieh --- .../pegasus_x/test_modeling_pegasus_x.py | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/tests/models/pegasus_x/test_modeling_pegasus_x.py b/tests/models/pegasus_x/test_modeling_pegasus_x.py index 2fade61ff76176..14ce6919f688a4 100644 --- a/tests/models/pegasus_x/test_modeling_pegasus_x.py +++ b/tests/models/pegasus_x/test_modeling_pegasus_x.py @@ -206,6 +206,12 @@ def setUp(self): self.model_tester = PegasusXModelTester(self) self.config_tester = ConfigTester(self, config_class=PegasusXConfig) + @unittest.skip( + "`PegasusXGlobalLocalAttention` returns attentions as dictionary - not compatible with torchscript " + ) + def test_torchscript_output_attentions(self): + pass + def test_config(self): self.config_tester.run_common_tests() @@ -565,12 +571,13 @@ def test_inference_no_head(self): inputs_dict = prepare_pegasus_x_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] - expected_shape = torch.Size((1, 11, 1024)) + expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( - [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device + [[0.0702, -0.1552, 0.1192], [0.0836, -0.1848, 0.1304], [0.0673, -0.1686, 0.1045]], device=torch_device ) + self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): @@ -586,13 +593,13 @@ def test_inference_head(self): self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( - [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device + [[0.0, 9.5705185, 1.5897303], [0.0, 9.833374, 1.5828674], [0.0, 10.429961, 1.5643371]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): - hf = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base").to(torch_device) - tok = PegasusTokenizer.from_pretrained("google/pegasus-x-large") + hf = PegasusXForConditionalGeneration.from_pretrained("google/pegasus-x-base-arxiv").to(torch_device) + tok = PegasusTokenizer.from_pretrained("google/pegasus-x-base") batch_input = [ "While large pretrained Transformer models have proven highly capable at tackling natural language tasks," @@ -626,7 +633,8 @@ def test_seq_to_seq_generation(self): ) EXPECTED = [ - "we investigate the performance of a new pretrained model for long input summarization. the model" + "we investigate the performance of a new pretrained model for long input summarization. the model is a" + " superposition of two well -" ] generated = tok.batch_decode( From 0b567aa430e9b1b56254de4775db5868104d34e5 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 14 Sep 2022 09:25:15 -0400 Subject: [PATCH 288/539] Add Document QA pipeline metadata (#19028) --- utils/update_metadata.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/utils/update_metadata.py b/utils/update_metadata.py index 945740f02ad695..7eb0294cda9e33 100644 --- a/utils/update_metadata.py +++ b/utils/update_metadata.py @@ -80,6 +80,11 @@ "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), + ( + "document-question-answering", + "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", + "AutoModelForDocumentQuestionAnswering", + ), ] From e1224a2a0f42c2d00f5b9d93d4d5dd43a628b574 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Wed, 14 Sep 2022 10:01:22 -0400 Subject: [PATCH 289/539] Making save_load test slow as it times out --- tests/models/vit_mae/test_modeling_tf_vit_mae.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/models/vit_mae/test_modeling_tf_vit_mae.py b/tests/models/vit_mae/test_modeling_tf_vit_mae.py index 906c79e766f425..99f851d2a9d21b 100644 --- a/tests/models/vit_mae/test_modeling_tf_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_tf_vit_mae.py @@ -328,6 +328,7 @@ def test_compile_tf_model(self): # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test + @slow() def test_keras_save_load(self): # make mask reproducible np.random.seed(2) From 1207deb80677f7c9b560ba165f264cc9b287988b Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Wed, 14 Sep 2022 10:02:14 -0400 Subject: [PATCH 290/539] Typo fix --- tests/models/vit_mae/test_modeling_tf_vit_mae.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/vit_mae/test_modeling_tf_vit_mae.py b/tests/models/vit_mae/test_modeling_tf_vit_mae.py index 99f851d2a9d21b..e0613b3b4b3c58 100644 --- a/tests/models/vit_mae/test_modeling_tf_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_tf_vit_mae.py @@ -328,7 +328,7 @@ def test_compile_tf_model(self): # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test - @slow() + @slow def test_keras_save_load(self): # make mask reproducible np.random.seed(2) From 6a9726ec0e3b8d3841441d911fe37a0538db4d3a Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 14 Sep 2022 16:13:20 +0200 Subject: [PATCH 291/539] Fix `DocumentQuestionAnsweringPipelineTests` (#19023) * Fix DocumentQuestionAnsweringPipelineTests Co-authored-by: ydshieh --- ...t_pipelines_document_question_answering.py | 37 ++++++++----------- 1 file changed, 16 insertions(+), 21 deletions(-) diff --git a/tests/pipelines/test_pipelines_document_question_answering.py b/tests/pipelines/test_pipelines_document_question_answering.py index 7bf8ec99fb5922..091f6c3c03b14a 100644 --- a/tests/pipelines/test_pipelines_document_question_answering.py +++ b/tests/pipelines/test_pipelines_document_question_answering.py @@ -113,13 +113,8 @@ def test_small_model_pt(self): question = "How many cats are there?" expected_output = [ - { - "score": 0.0001, - "answer": "2312/2019 DUE DATE 26102/2019 ay DESCRIPTION UNIT PRICE", - "start": 38, - "end": 45, - }, - {"score": 0.0001, "answer": "2312/2019 DUE", "start": 38, "end": 39}, + {"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39}, + {"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) @@ -170,8 +165,8 @@ def test_large_model_pt(self): self.assertEqual( nested_simplify(outputs, decimals=4), [ - {"score": 0.9966, "answer": "us-001", "start": 15, "end": 15}, - {"score": 0.0009, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, + {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) @@ -179,8 +174,8 @@ def test_large_model_pt(self): self.assertEqual( nested_simplify(outputs, decimals=4), [ - {"score": 0.9966, "answer": "us-001", "start": 15, "end": 15}, - {"score": 0.0009, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, + {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) @@ -191,8 +186,8 @@ def test_large_model_pt(self): nested_simplify(outputs, decimals=4), [ [ - {"score": 0.9966, "answer": "us-001", "start": 15, "end": 15}, - {"score": 0.0009, "answer": "us-001", "start": 15, "end": 15}, + {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, + {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2, @@ -219,8 +214,8 @@ def test_large_model_pt_layoutlm(self): self.assertEqual( nested_simplify(outputs, decimals=4), [ - {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, - {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, + {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) @@ -228,8 +223,8 @@ def test_large_model_pt_layoutlm(self): self.assertEqual( nested_simplify(outputs, decimals=4), [ - {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, - {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, + {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) @@ -240,8 +235,8 @@ def test_large_model_pt_layoutlm(self): nested_simplify(outputs, decimals=4), [ [ - {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, - {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, + {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2, @@ -254,8 +249,8 @@ def test_large_model_pt_layoutlm(self): self.assertEqual( nested_simplify(outputs, decimals=4), [ - {"score": 0.9998, "answer": "us-001", "start": 15, "end": 15}, - {"score": 0.0, "answer": "INVOICE # us-001", "start": 13, "end": 15}, + {"score": 0.4251, "answer": "us-001", "start": 16, "end": 16}, + {"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23}, ], ) From f5f430e5c80b85b57bb910435e45d84746210133 Mon Sep 17 00:00:00 2001 From: Shinya Otani <67080255+SO0529@users.noreply.github.com> Date: Wed, 14 Sep 2022 23:17:40 +0900 Subject: [PATCH 292/539] Add support for Japanese GPT-NeoX-based model by ABEJA, Inc. (#18814) * add gpt-neox-japanese model and tokenizer as new model * Correction to PR's comment for GPT NeoX Japanese - Fix to be able to use gpu - Add comment # Copied... at the top of RotaryEmbedding - Implement nn.Linear instead of original linear class - Add generation test under @slow * fix bias treatment for gpt-neox-japanese * Modidy gpt-neox-japanese following PR - add doc for bias_dropout_add - style change following a PR comment * add document for gpt-neox-japanese * remove unused import from gpt-neox-japanese * fix README for gpt-neox-japanese --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + .../source/en/model_doc/gpt_neox_japanese.mdx | 66 ++ src/transformers/__init__.py | 20 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + src/transformers/models/auto/modeling_auto.py | 3 + .../models/auto/tokenization_auto.py | 1 + .../models/gpt_neox_japanese/__init__.py | 66 ++ .../configuration_gpt_neox_japanese.py | 124 +++ .../modeling_gpt_neox_japanese.py | 724 ++++++++++++++++++ .../tokenization_gpt_neox_japanese.py | 379 +++++++++ src/transformers/utils/dummy_pt_objects.py | 31 + .../utils/dummy_tokenizers_objects.py | 7 + tests/models/gpt_neox_japanese/__init__.py | 0 .../test_modeling_gpt_neox_japanese.py | 255 ++++++ .../test_tokenization_gpt_neox_japanese.py | 137 ++++ 21 files changed, 1825 insertions(+) create mode 100644 docs/source/en/model_doc/gpt_neox_japanese.mdx create mode 100644 src/transformers/models/gpt_neox_japanese/__init__.py create mode 100644 src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py create mode 100755 src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py create mode 100644 src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py create mode 100644 tests/models/gpt_neox_japanese/__init__.py create mode 100644 tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py create mode 100644 tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py diff --git a/README.md b/README.md index 570f12ac44e06a..abadb9f57406f1 100644 --- a/README.md +++ b/README.md @@ -305,6 +305,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. 1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach +1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/main/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. 1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang. diff --git a/README_ko.md b/README_ko.md index c6016624a861c6..2a42664e20d771 100644 --- a/README_ko.md +++ b/README_ko.md @@ -257,6 +257,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. 1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach +1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/main/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. 1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang. diff --git a/README_zh-hans.md b/README_zh-hans.md index f3c07bfb361d11..7d0642cc9abff3 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -281,6 +281,7 @@ conda install -c huggingface transformers 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (来自 OpenAI) 伴随论文 [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) 由 Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever 发布。 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (来自 EleutherAI) 随仓库 [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) 发布。作者为 Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy 发布。 1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach +1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/main/model_doc/gpt_neox_japanese)** (来自 ABEJA) 由 Shinya Otani, Takayoshi Makabe, Anuj Arora, Kyo Hattori。 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (来自 OpenAI) 伴随论文 [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) 由 Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** 发布。 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (来自 EleutherAI) 伴随论文 [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) 由 Ben Wang and Aran Komatsuzaki 发布。 1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (来自 UCSD, NVIDIA) 伴随论文 [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) 由 Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 2ef861d0592358..c4de5181002d73 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -293,6 +293,7 @@ conda install -c huggingface transformers 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. 1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach +1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/main/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released with the paper [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. 1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index b59fdfbc46d91a..c21388b60a6f9f 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -253,6 +253,8 @@ title: GPT Neo - local: model_doc/gpt_neox title: GPT NeoX + - local: model_doc/gpt_neox_japanese + title: GPT NeoX Japanese - local: model_doc/gptj title: GPT-J - local: model_doc/gpt2 diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 265fe39f25fdb9..f118359bc57bee 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -97,6 +97,7 @@ The documentation is organized into five sections: 1. **[GPT](model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. 1. **[GPT Neo](model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. 1. **[GPT NeoX](model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach +1. **[GPT NeoX Japanese](model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. 1. **[GPT-2](model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. 1. **[GPT-J](model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. 1. **[GroupViT](model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang. @@ -242,6 +243,7 @@ Flax), PyTorch, and/or TensorFlow. | GLPN | ❌ | ❌ | ✅ | ❌ | ❌ | | GPT Neo | ❌ | ❌ | ✅ | ❌ | ✅ | | GPT NeoX | ❌ | ✅ | ✅ | ❌ | ❌ | +| GPT NeoX Japanese | ✅ | ❌ | ✅ | ❌ | ❌ | | GPT-J | ❌ | ❌ | ✅ | ✅ | ✅ | | GroupViT | ❌ | ❌ | ✅ | ❌ | ❌ | | Hubert | ❌ | ❌ | ✅ | ✅ | ❌ | diff --git a/docs/source/en/model_doc/gpt_neox_japanese.mdx b/docs/source/en/model_doc/gpt_neox_japanese.mdx new file mode 100644 index 00000000000000..da94b7497603c8 --- /dev/null +++ b/docs/source/en/model_doc/gpt_neox_japanese.mdx @@ -0,0 +1,66 @@ + + +# GPT-NeoX-Japanese + +## Overview + +We introduce GPT-NeoX-Japanese, which is an autoregressive language model for Japanese, trained on top of [https://github.com/EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox). +Japanese is a unique language with its large vocabulary and a combination of hiragana, katakana, and kanji writing scripts. +To address this distinct structure of the Japanese language, we use a [special sub-word tokenizer](https://github.com/tanreinama/Japanese-BPEEncoder_V2). We are very grateful to *tanreinama* for open-sourcing this incredibly helpful tokenizer. +Following the recommendations from Google's research on [PaLM](https://ai.googleblog.com/2022/04/pathways-language-model-palm-scaling-to.html), we have removed bias parameters from transformer blocks, achieving better model performance. Please refer [this article](https://medium.com/ml-abeja/training-a-better-gpt-2-93b157662ae4) in detail. + +Development of the model was led by [Shinya Otani](https://github.com/SO0529), [Takayoshi Makabe](https://github.com/spider-man-tm), [Anuj Arora](https://github.com/Anuj040), and [Kyo Hattori](https://github.com/go5paopao) from [ABEJA, Inc.](https://www.abejainc.com/). For more information on this model-building activity, please refer [here (ja)](https://tech-blog.abeja.asia/entry/abeja-gpt-project-202207). + +### Generation + +The `generate()` method can be used to generate text using GPT NeoX Japanese model. + +```python +>>> from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseTokenizer + +>>> model = GPTNeoXJapaneseForCausalLM.from_pretrained("abeja/gpt-neox-japanese-2.7b") +>>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b") + +>>> prompt = "人とAIが協調するためには、" + +>>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids + +>>> gen_tokens = model.generate( +... input_ids, +... do_sample=True, +... temperature=0.9, +... max_length=100, +... ) +>>> gen_text = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)[0] + +>>> print(gen_text) +人とAIが協調するためには、AIと人が共存し、AIを正しく理解する必要があります。 +``` + +## GPTNeoXJapaneseConfig + +[[autodoc]] GPTNeoXJapaneseConfig + +## GPTNeoXJapaneseTokenizer + +[[autodoc]] GPTNeoXJapaneseTokenizer + +## GPTNeoXJapaneseModel + +[[autodoc]] GPTNeoXJapaneseModel + - forward + +## GPTNeoXJapaneseForCausalLM + +[[autodoc]] GPTNeoXJapaneseForCausalLM + - forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 2671b37d8ebd60..c6fd12595edfa3 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -225,6 +225,7 @@ "models.gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2Tokenizer"], "models.gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig"], "models.gpt_neox": ["GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXConfig"], + "models.gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"], "models.gptj": ["GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTJConfig"], "models.groupvit": [ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", @@ -558,6 +559,7 @@ _import_structure["models.funnel"].append("FunnelTokenizerFast") _import_structure["models.gpt2"].append("GPT2TokenizerFast") _import_structure["models.gpt_neox"].append("GPTNeoXTokenizerFast") + _import_structure["models.gpt_neox_japanese"].append("GPTNeoXJapaneseTokenizer") _import_structure["models.herbert"].append("HerbertTokenizerFast") _import_structure["models.layoutlm"].append("LayoutLMTokenizerFast") _import_structure["models.layoutlmv2"].append("LayoutLMv2TokenizerFast") @@ -1291,6 +1293,15 @@ "GPTNeoXPreTrainedModel", ] ) + _import_structure["models.gpt_neox_japanese"].extend( + [ + "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTNeoXJapaneseForCausalLM", + "GPTNeoXJapaneseLayer", + "GPTNeoXJapaneseModel", + "GPTNeoXJapanesePreTrainedModel", + ] + ) _import_structure["models.gptj"].extend( [ "GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3114,6 +3125,7 @@ from .models.gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2Tokenizer from .models.gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig from .models.gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig + from .models.gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig from .models.gptj import GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTJConfig from .models.groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, @@ -3412,6 +3424,7 @@ from .models.funnel import FunnelTokenizerFast from .models.gpt2 import GPT2TokenizerFast from .models.gpt_neox import GPTNeoXTokenizerFast + from .models.gpt_neox_japanese import GPTNeoXJapaneseTokenizer from .models.herbert import HerbertTokenizerFast from .models.layoutlm import LayoutLMTokenizerFast from .models.layoutlmv2 import LayoutLMv2TokenizerFast @@ -4008,6 +4021,13 @@ GPTNeoXModel, GPTNeoXPreTrainedModel, ) + from .models.gpt_neox_japanese import ( + GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTNeoXJapaneseForCausalLM, + GPTNeoXJapaneseLayer, + GPTNeoXJapaneseModel, + GPTNeoXJapanesePreTrainedModel, + ) from .models.gptj import ( GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST, GPTJForCausalLM, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 6a206bb9684235..fbdbfd579cb9e2 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -68,6 +68,7 @@ gpt2, gpt_neo, gpt_neox, + gpt_neox_japanese, gptj, groupvit, herbert, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index ae0e88bd4a1e04..1204e6608a768f 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -72,6 +72,7 @@ ("gpt2", "GPT2Config"), ("gpt_neo", "GPTNeoConfig"), ("gpt_neox", "GPTNeoXConfig"), + ("gpt_neox_japanese", "GPTNeoXJapaneseConfig"), ("gptj", "GPTJConfig"), ("groupvit", "GroupViTConfig"), ("hubert", "HubertConfig"), @@ -201,6 +202,7 @@ ("gpt2", "GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("gpt_neo", "GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("gpt_neox", "GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("gpt_neox_japanese", "GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("gptj", "GPTJ_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("groupvit", "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("hubert", "HUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -331,6 +333,7 @@ ("gpt2", "OpenAI GPT-2"), ("gpt_neo", "GPT Neo"), ("gpt_neox", "GPT NeoX"), + ("gpt_neox_japanese", "GPT NeoX Japanese"), ("gptj", "GPT-J"), ("groupvit", "GroupViT"), ("herbert", "HerBERT"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 9edfae0c89be85..caa4c9d4dffd66 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -71,6 +71,7 @@ ("gpt2", "GPT2Model"), ("gpt_neo", "GPTNeoModel"), ("gpt_neox", "GPTNeoXModel"), + ("gpt_neox_japanese", "GPTNeoXJapaneseModel"), ("gptj", "GPTJModel"), ("groupvit", "GroupViTModel"), ("hubert", "HubertModel"), @@ -234,6 +235,7 @@ ("gpt2", "GPT2LMHeadModel"), ("gpt_neo", "GPTNeoForCausalLM"), ("gpt_neox", "GPTNeoXForCausalLM"), + ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), ("gptj", "GPTJForCausalLM"), ("ibert", "IBertForMaskedLM"), ("layoutlm", "LayoutLMForMaskedLM"), @@ -292,6 +294,7 @@ ("gpt2", "GPT2LMHeadModel"), ("gpt_neo", "GPTNeoForCausalLM"), ("gpt_neox", "GPTNeoXForCausalLM"), + ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"), ("gptj", "GPTJForCausalLM"), ("marian", "MarianForCausalLM"), ("mbart", "MBartForCausalLM"), diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 7aa7627cab2cb5..97e048885e1800 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -129,6 +129,7 @@ ("gpt2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), ("gpt_neo", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), ("gpt_neox", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)), + ("gpt_neox_japanese", ("GPTNeoXJapaneseTokenizer", None)), ("gptj", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)), ("groupvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), ("herbert", ("HerbertTokenizer", "HerbertTokenizerFast" if is_tokenizers_available() else None)), diff --git a/src/transformers/models/gpt_neox_japanese/__init__.py b/src/transformers/models/gpt_neox_japanese/__init__.py new file mode 100644 index 00000000000000..0d18143c0f02a3 --- /dev/null +++ b/src/transformers/models/gpt_neox_japanese/__init__.py @@ -0,0 +1,66 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...file_utils import _LazyModule, is_torch_available +from ...utils import OptionalDependencyNotAvailable + + +_import_structure = { + "configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"], + "tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_gpt_neox_japanese"] = [ + "GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTNeoXJapaneseForCausalLM", + "GPTNeoXJapaneseLayer", + "GPTNeoXJapaneseModel", + "GPTNeoXJapanesePreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig + from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_gpt_neox_japanese import ( + GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTNeoXJapaneseForCausalLM, + GPTNeoXJapaneseLayer, + GPTNeoXJapaneseModel, + GPTNeoXJapanesePreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py new file mode 100644 index 00000000000000..749a9400b961e2 --- /dev/null +++ b/src/transformers/models/gpt_neox_japanese/configuration_gpt_neox_japanese.py @@ -0,0 +1,124 @@ +# coding=utf-8 +# Copyright 2022 ABEJA, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" GPTNeoX Japanese model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", +} + + +class GPTNeoXJapaneseConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`GPTNeoXModelJapanese`]. It is used to instantiate + a GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the GPTNeoXJapanese + [abeja/gpt-neox-japanese-2.7b](https://huggingface.co/abeja/gpt-neox-japanese-2.7b) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. Default configs is set as 2.7B model + + Args: + vocab_size (`int`, *optional*, defaults to 32000): + Vocabulary size of the GPTNeoXJapanese model. Defines the number of different tokens that can be + represented by the `inputs_ids` passed when calling [`GPTNeoXJapanese`]. + hidden_size (`int`, *optional*, defaults to 2560): + Dimension of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 32): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_multiple_size (`int`, *optional*, defaults to 4): + Dimension of the "intermediate" layer in the Transformer encoder is calculated by hidden_size * + intermediate_multiple_size. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. + rotary_pct (`float`, *optional*, defaults to 1.00): + percentage of hidden dimensions to allocate to rotary embeddings + rotary_emb_base (`int`, *optional*, defaults to 10000) + base for computing rotary embeddings frequency + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + weight_tying (`bool`, *optional*, defaults to `True`): + Whhether or not use weight tying between input and output embedding weight + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention. + hidden_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the hidden layer. + Example: + + ```python + >>> from transformers import GPTNeoXJapaneseModel, GPTNeoXJapaneseConfig + + >>> # Initializing a GPTNeoXJapanese gpt-neox-japanese-2.7b style configuration + >>> configuration = GPTNeoXJapaneseConfig() + + >>> # Initializing a model from the gpt-neox-japanese-2.7b style configuration + >>> model = GPTNeoXJapaneseModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "gpt_neox_japanese" + + def __init__( + self, + vocab_size=32000, + hidden_size=2560, + num_hidden_layers=32, + num_attention_heads=32, + intermediate_multiple_size=4, + hidden_act="gelu", + rotary_pct=1.00, + rotary_emb_base=10000, + max_position_embeddings=2048, + initializer_range=0.02, + layer_norm_eps=1e-5, + use_cache=True, + bos_token_id=31996, + eos_token_id=31999, + weight_tying=True, + attention_dropout=0.1, + hidden_dropout=0.0, + **kwargs + ): + super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_multiple_size = intermediate_multiple_size + self.hidden_act = hidden_act + self.rotary_pct = rotary_pct + self.rotary_emb_base = rotary_emb_base + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.use_cache = use_cache + self.weight_tying = weight_tying + self.attention_dropout = attention_dropout + self.hidden_dropout = hidden_dropout diff --git a/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py new file mode 100755 index 00000000000000..b79ef4f1e41b79 --- /dev/null +++ b/src/transformers/models/gpt_neox_japanese/modeling_gpt_neox_japanese.py @@ -0,0 +1,724 @@ +# coding=utf-8 +# Copyright 2022 ABEJA, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch GPTNeoX model.""" + +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import Tensor, nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings +from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast +from ...modeling_utils import PreTrainedModel +from ...utils import logging +from .configuration_gpt_neox_japanese import GPTNeoXJapaneseConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "abeja/gpt-neox-japanese-2.7b" +_CONFIG_FOR_DOC = "GPTNeoXJapaneseConfig" +_TOKENIZER_FOR_DOC = "GPTNeoXJapaneseTokenizer" + +GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = { + "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", + # See all GPTNeoXJapanese models at https://huggingface.co/models?filter=gpt_neox_japanese +} + + +class GPTNeoXJapanesePreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPTNeoXJapaneseConfig + base_model_prefix = "gpt_neox_japanese" + supports_gradient_checkpointing = True + _no_split_modules = ["GPTNeoXJapaneseLayer"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, GPTNeoXJapaneseModel): + module.gradient_checkpointing = value + + +class GPTNeoXJapaneseAttention(nn.Module): + def __init__(self, config, use_bias=False): + super().__init__() + self.num_attention_heads = config.num_attention_heads + self.hidden_size = config.hidden_size + self.head_size = self.hidden_size // self.num_attention_heads + + self.rotary_ndims = int(self.head_size * config.rotary_pct) + self.rotary_emb = RotaryEmbedding( + self.rotary_ndims, config.max_position_embeddings, base=config.rotary_emb_base + ) + self.max_positions = config.max_position_embeddings + self.attention_dropout = nn.Dropout(config.attention_dropout) + self.norm_factor = torch.sqrt(torch.tensor(self.head_size, dtype=torch.float32)).to(torch.get_default_dtype()) + + self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=False) + self.dense = nn.Linear(config.hidden_size, config.hidden_size, bias=False) + # Activate bias if the last layer + self.use_bias = use_bias + self.dense_bias = nn.Parameter(torch.zeros(config.hidden_size)) if use_bias else None + + def forward( + self, + hidden_states, + attention_mask, + head_mask=None, + layer_past=None, + use_cache=False, + output_attentions=False, + ): + has_layer_past = layer_past is not None and layer_past[0].numel() > 0 + + # Compute QKV + # Attention heads [batch, seq_len, hidden_size] + # --> [batch, seq_len, (np * 3 * head_size)] + qkv = self.query_key_value(hidden_states) + + # [batch, seq_len, (num_heads * 3 * head_size)] + # --> [batch, seq_len, num_heads, 3 * head_size] + new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size) + qkv = qkv.view(*new_qkv_shape) + + # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size] + query = qkv[..., : self.head_size].permute(0, 2, 1, 3) + key = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3) + value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3) + + # Compute rotary embeddings on rotary_ndims + query_rot = query[..., : self.rotary_ndims] + query_pass = query[..., self.rotary_ndims :] + key_rot = key[..., : self.rotary_ndims] + key_pass = key[..., self.rotary_ndims :] + + # Compute token offset for rotary embeddings (when decoding) + seq_len = key.shape[-2] + offset = 0 + if has_layer_past: + offset = layer_past[0].shape[-2] + seq_len += offset + cos, sin = self.rotary_emb(value, seq_len=seq_len) + query, key = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, offset=offset) + query = torch.cat((query, query_pass), dim=-1) + key = torch.cat((key, key_pass), dim=-1) + + # Cache QKV values + if has_layer_past: + past_key = layer_past[0] + past_value = layer_past[1] + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + present = (key, value) if use_cache else None + + # Compute attention + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + + # Reshape outputs + attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size) + attn_output = self.dense(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights,) + + return outputs, self.dense_bias + + @classmethod + def _split_heads(cls, tensor, num_attention_heads, attn_head_size): + """ + Splits hidden dim into attn_head_size and num_attention_heads + """ + # tensor: [bs, seq_len, hidden_size] + new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size) + # -> [bs, seq_len, num_attention_heads, attn_head_size] + tensor = tensor.view(new_shape) + # -> [bs, num_attention_heads, seq_len, attn_head_size] + tensor = tensor.permute(0, 2, 1, 3) + return tensor + + @classmethod + def _merge_heads(cls, tensor, num_attention_heads, attn_head_size): + """ + Merges attn_head_size dim and num_attn_heads dim into hidden dim + """ + # tensor [bs, num_attention_heads, seq_len, attn_head_size] + tensor = tensor.permute(0, 2, 1, 3).contiguous() + # -> [bs, seq_len, num_attention_heads, attn_head_size] + tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size) + # -> [bs, seq_len, hidden_size] + return tensor + + def _create_casual_mask(self, key_length, query_length): + casual_mask = torch.tril( + torch.ones((self.max_positions, self.max_positions), dtype=torch.uint8).view( + 1, 1, self.max_positions, self.max_positions + ) + ) + return casual_mask[:, :, key_length - query_length : key_length, :key_length].bool() + + def _attn(self, query, key, value, attention_mask=None, head_mask=None): + # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size] + # compute causal mask from causal mask buffer + batch_size, num_attention_heads, query_length, attn_head_size = query.size() + key_length = key.size(-2) + + causal_mask = self._create_casual_mask(key_length, query_length) + + query = query.view(batch_size * num_attention_heads, query_length, attn_head_size) + key = key.view(batch_size * num_attention_heads, key_length, attn_head_size) + attn_scores = torch.zeros( + batch_size * num_attention_heads, + query_length, + key_length, + dtype=query.dtype, + device=key.device, + ) + attn_scores = torch.baddbmm( + attn_scores, + query, + key.transpose(1, 2), + beta=1.0, + alpha=(torch.tensor(1.0, dtype=self.norm_factor.dtype, device=self.norm_factor.device) / self.norm_factor), + ) + attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length) + + mask_value = torch.finfo(attn_scores.dtype).min + # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. + # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` + mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device) + causal_mask = causal_mask.to(attn_scores.device) + attn_scores = torch.where(causal_mask, attn_scores, mask_value) + + if attention_mask is not None: + # Apply the attention mask + attn_scores = attn_scores + attention_mask + + attn_weights = nn.functional.softmax(attn_scores, dim=-1) + attn_weights = self.attention_dropout(attn_weights) + attn_weights = attn_weights.to(value.dtype) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = torch.matmul(attn_weights, value) + return attn_output, attn_weights + + +# Copied from transformers.models.gpt_neox.modeling_gpt_neox.RotaryEmbedding +class RotaryEmbedding(torch.nn.Module): + def __init__(self, dim, max_position_embeddings, base=10000, device=None): + super().__init__() + inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) + self.register_buffer("inv_freq", inv_freq) + + # Build here to make `torch.jit.trace` work. + self.max_seq_len_cached = max_position_embeddings + t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.cos_cached = emb.cos()[None, None, :, :] + self.sin_cached = emb.sin()[None, None, :, :] + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. + if seq_len > self.max_seq_len_cached: + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum("i,j->ij", t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + self.cos_cached = emb.cos()[None, None, :, :] + self.sin_cached = emb.sin()[None, None, :, :] + return self.cos_cached[:seq_len, ...].to(x.device), self.sin_cached[:seq_len, ...].to(x.device) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., : x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2 :] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, offset: int = 0): + cos = cos[..., offset : q.shape[-2] + offset, :] + sin = sin[..., offset : q.shape[-2] + offset, :] + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +def bias_dropout_add(x: Tensor, bias: Tensor, residual: Optional[Tensor], prob: float, training: bool) -> Tensor: + """add bias to x, apply dropout and residual connection + + Args: + x (Tensor): main path of output + bias (Tensor): None or attn_bias of the last attention layer + residual (Optional[Tensor]): residual value + prob (float): dropout probability + training (bool): whether in training mode or not + + Returns: + Tensor: dropout(x + bias) + residual + """ + if bias is not None: + x = x + bias + out = torch.nn.functional.dropout(x, p=prob, training=training) + if residual is not None: + out = residual + out + return out + + +class GPTNeoXJapaneseMLP(nn.Module): + def __init__(self, config): + super().__init__() + intermediate_size = int(config.hidden_size * config.intermediate_multiple_size) + self.dense_h_to_4h = nn.Linear(config.hidden_size, intermediate_size, bias=False) + # Project back to h. + self.dense_4h_to_h = nn.Linear(intermediate_size, config.hidden_size, bias=False) + self.act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states): + intermediate = self.dense_h_to_4h(hidden_states) + intermediate = self.act(intermediate) + output = self.dense_4h_to_h(intermediate) + return output + + +class GPTNeoXJapaneseLayer(nn.Module): + def __init__(self, config, layer_number): + super().__init__() + self.layer_number = layer_number + self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + # activate bias only last layer + self.attention = GPTNeoXJapaneseAttention(config=config, use_bias=layer_number == config.num_hidden_layers - 1) + self.mlp = GPTNeoXJapaneseMLP(config) + self.hidden_dropout = config.hidden_dropout + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + use_cache=False, + layer_past=None, + output_attentions=False, + ): + residual = hidden_states + ln_out = self.input_layernorm(hidden_states) + attention_layer_outputs, attn_bias = self.attention( + ln_out, + attention_mask=attention_mask, + layer_past=layer_past, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attn_output = attention_layer_outputs[0] # output_attn: a, present, (attentions) + outputs = attention_layer_outputs[1:] + + # attn_output = (atten_output + bias) + residual + attn_output = bias_dropout_add( + attn_output, + bias=attn_bias.expand_as(residual) if attn_bias is not None else attn_bias, + residual=residual, + prob=self.hidden_dropout, + training=self.training, + ) + mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) + + # attn_output = (mlp_output + mlp_bias) + atten_output + attn_output = bias_dropout_add( + mlp_output, bias=None, residual=attn_output, prob=self.hidden_dropout, training=self.training + ) + + if use_cache: + outputs = (attn_output,) + outputs + else: + outputs = (attn_output,) + outputs[1:] + + return outputs # hidden_states, present, (attentions) + + +GPT_NEOX_JAPANESE_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`~GPTNeoXJapaneseConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GPT_NEOX_JAPANESE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`GPTNeoXJapaneseTokenizer`]. + + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare GPTNeoXJapanese Model transformer outputting raw hidden-states without any specific head on top.", + GPT_NEOX_JAPANESE_START_DOCSTRING, +) +class GPTNeoXJapaneseModel(GPTNeoXJapanesePreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.config = config + + self.embed_in = nn.Embedding(config.vocab_size, config.hidden_size) + self.layers = nn.ModuleList( + [GPTNeoXJapaneseLayer(config=config, layer_number=i) for i in range(config.num_hidden_layers)] + ) + self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_in + + def set_input_embeddings(self, value): + self.embed_in = value + + @add_start_docstrings_to_model_forward(GPT_NEOX_JAPANESE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=BaseModelOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + r""" + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + Returns: + + Example: + + ```python + >>> from transformers import GPTNeoXJapaneseTokenizer, GPTNeoXJapaneseModel + >>> import torch + + >>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b") + >>> model = GPTNeoXJapaneseModel.from_pretrained("abeja/gpt-neox-japanese-2.7b") + + >>> inputs = tokenizer("日本語のGPT-neoxがHugging Faceで使えます😀", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + ``` + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + use_cache = use_cache if use_cache is not None else self.config.use_cache + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + + if past_key_values is None: + past_key_values = tuple([None] * self.config.num_hidden_layers) + + # Attention mask. + if attention_mask is not None: + if not batch_size > 0: + raise ValueError("batch_size has to be defined and > 0") + attention_mask = attention_mask.view(batch_size, -1) + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + attention_mask = attention_mask[:, None, None, :] + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility + attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + if inputs_embeds is None: + inputs_embeds = self.embed_in(input_ids) + + hidden_states = inputs_embeds + + presents = () if use_cache else None + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + outputs = layer( + hidden_states, + attention_mask=attention_mask, + head_mask=head_mask[i], + layer_past=layer_past, + use_cache=use_cache, + output_attentions=output_attentions, + ) + hidden_states = outputs[0] + if use_cache is True: + presents = presents + (outputs[1],) + if output_attentions: + all_attentions = all_attentions + (outputs[2 if use_cache else 1],) + + hidden_states = self.final_layer_norm(hidden_states) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_attentions, + ) + + +@add_start_docstrings( + """GPTNeoXJapanese Model with a `language modeling` head on top for Classifier Model fine-tuning.""", + GPT_NEOX_JAPANESE_START_DOCSTRING, +) +class GPTNeoXJapaneseForCausalLM(GPTNeoXJapanesePreTrainedModel): + + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + + def __init__(self, config): + super().__init__(config) + self.config = config + + self.gpt_neox_japanese = GPTNeoXJapaneseModel(config) + self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.embed_out + + def set_output_embeddings(self, new_embeddings): + self.embed_out = new_embeddings + + @add_start_docstrings_to_model_forward(GPT_NEOX_JAPANESE_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are + only required when the model is used as a decoder in a Sequence to Sequence model. + + Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see + `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + Returns: + + Example: + + ```python + >>> from transformers import GPTNeoXJapaneseTokenizer, GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseConfig + >>> import torch + + >>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b") + >>> config = GPTNeoXJapaneseConfig.from_pretrained("abeja/gpt-neox-japanese-2.7b") + >>> config.is_decoder = True + >>> model = GPTNeoXJapaneseForCausalLM.from_pretrained("abeja/gpt-neox-japanese-2.7b", config=config) + + >>> inputs = tokenizer("日本語のGPT-neoxがHugging Faceで使えます😀", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.logits + ``` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.gpt_neox_japanese( + input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + lm_logits = self.embed_out(hidden_states) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shift_logits = lm_logits[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithPast( + loss=lm_loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past and past[0] is not None: + input_ids = input_ids[:, -1:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], + ) + return reordered_past diff --git a/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py b/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py new file mode 100644 index 00000000000000..a132d999a31370 --- /dev/null +++ b/src/transformers/models/gpt_neox_japanese/tokenization_gpt_neox_japanese.py @@ -0,0 +1,379 @@ +# coding=utf-8 +# Copyright 2022 ABEJA, Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for GPTNeoXJapanese.""" +import collections +import json +import os +import re +from typing import TYPE_CHECKING, List, Optional, Tuple + +import numpy as np + +from ...tokenization_utils_fast import PreTrainedTokenizer +from ...utils import logging + + +if TYPE_CHECKING: + from transformers.pipelines.conversational import Conversation + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt", + }, + "emoji_file": { + "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json", + }, +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "abeja/gpt-neox-japanese-2.7b": 2048, +} + + +def load_vocab_and_emoji(vocab_file, emoji_file): + """Loads a vocabulary file and emoji file into a dictionary.""" + with open(emoji_file, "r", encoding="utf-8") as f: + emoji = json.loads(f.read()) + + vocab = collections.OrderedDict() + raw_vocab = collections.OrderedDict() + ids_to_tokens = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as f: + token = f.readlines() + token = [[t.rstrip("\n")] if (t == "," or "," not in t) else t.rstrip("\n").split(",") for t in token] + for idx, b in enumerate(token): + ids_to_tokens[idx] = b + raw_vocab[",".join(b)] = idx + for wd in b: + vocab[wd] = idx + + return vocab, raw_vocab, ids_to_tokens, emoji + + +class GPTNeoXJapaneseTokenizer(PreTrainedTokenizer): + """ + This tokenizer inherits from [`PreTrainedTokenizer`] and is based on Japanese special Sub-Word-Encoding that is + used in this repository (https://github.com/tanreinama/Japanese-BPEEncoder_V2). Check the repository for details. + Japanese has a relatively large vocabulary and there is no separation between words. Furthermore, the language is a + combination of hiragana, katakana, and kanji, and variants such as "1" and "①" are often used. In order to cope + with these, this tokenizer has the following features + - Subword-by-subword segmentation, which is intermediate between byte strings and morphological analysis. + - BPEs are created for each Kanji, Hiragana, and Katakana character, and there are no BPEs that cross character + types, such as Kanji + Hiragana or Hiragana + Katakana. + - All-byte encoding that does not require . + - Independent of UTF codes such as 2-byte and 3-byte characters + - Conversion of heterographs to the same token_id + - Emoji and Emoticon are grouped into 12 types as special tags. + + Example: + + ```python + >>> from transformers import GPTNeoXJapaneseTokenizer + + >>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b") + >>> # You can confirm both 慶応 and 慶應 are encoded to 17749 + >>> tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"] + [30014, 26883, 26638, 27228, 25, 26650, 31732, 31679, 27809, 26638, 17749, 31592, 17749, 31593, 321, 1281] + + >>> # Both 慶応 and 慶應 are decoded to 慶応 + >>> tokenizer.decode(tokenizer("吾輩は猫である🐯。実は慶応(慶應)大学出身")["input_ids"]) + '吾輩は猫である🐯。実は慶応(慶応)大学出身' + ``` + + Args: + vocab_file (`str`): + File containing the vocabulary. + emoji_file (`str`): + File containing the emoji. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The token used for padding + bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`): + The beginning of sequence token. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + do_clean_text (`bool`, *optional*, defaults to `False`): + Whether or not to clean text for URL, EMAIL, TEL, Japanese DATE and Japanese PRICE. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + emoji_file, + unk_token="<|endoftext|>", + pad_token="<|endoftext|>", + bos_token="<|startoftext|>", + eos_token="<|endoftext|>", + do_clean_text=False, + **kwargs + ): + super().__init__( + unk_token=unk_token, + pad_token=pad_token, + bos_token=bos_token, + eos_token=eos_token, + do_clean_text=do_clean_text, + **kwargs, + ) + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" + " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + if not os.path.isfile(emoji_file): + raise ValueError( + f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google" + " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.do_clean_text = do_clean_text + self.vocab, self.raw_vocab, self.ids_to_tokens, self.emoji = load_vocab_and_emoji(vocab_file, emoji_file) + self.subword_tokenizer = SubWordJapaneseTokenizer( + vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji + ) + + @property + def vocab_size(self): + # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab + return len(self.raw_vocab) + + def get_vocab(self): + return dict(self.raw_vocab, **self.added_tokens_encoder) + + def _tokenize(self, text): + return self.subword_tokenizer.tokenize(text, clean=self.do_clean_text) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.subword_tokenizer.convert_id_to_token(index) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = "".join(tokens).strip() + return out_string + + def _build_conversation_input_ids(self, conversation: "Conversation") -> List[int]: + """This corresponds to DialoGPT variants of models.""" + input_ids = [] + for is_user, text in conversation.iter_texts(): + input_ids.extend(self.encode(text, add_special_tokens=False) + [self.eos_token_id]) + + if len(input_ids) > self.model_max_length: + input_ids = input_ids[-self.model_max_length :] + return input_ids + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + emoji_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] + ) + else: + vocab_file = ( + (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] + ) + emoji_file = ( + (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] + ) + with open(vocab_file, "w", encoding="utf-8") as writer: + for token_index, token in self.ids_to_tokens.items(): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(",".join(token) + "\n") + index += 1 + with open(emoji_file, "w", encoding="utf-8") as writer: + json.dump(self.emoji, writer) + return vocab_file, emoji_file + + +class SubWordJapaneseTokenizer(object): + """ + https://github.com/tanreinama/Japanese-BPEEncoder_V2 This tokenizer class is under MIT Lisence according to the + original repository. + + MIT License + + Copyright (c) 2020 tanreinama + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated + documentation files (the "Software"), to deal in the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all copies or substantial portions of + the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO + THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + """ + + def __init__(self, vocab, ids_to_tokens, emoji): + self.vocab = vocab # same as swe + self.ids_to_tokens = ids_to_tokens # same as bpe + self.emoji = emoji + self.maxlen = np.max([len(w) for w in self.vocab.keys()]) + self.content_repatter1 = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)") + self.content_repatter2 = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*") + self.content_repatter3 = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}") + self.content_repatter4 = re.compile( + r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" + ) + self.content_repatter5 = re.compile( + r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" + ) + self.content_repatter6 = re.compile( + r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" + ) + keisen = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" + blocks = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" + self.content_trans1 = str.maketrans({k: "" for k in keisen + blocks}) + + def __len__(self): + return len(self.ids_to_tokens) + + def clean_text(self, content): + content = self.content_repatter1.sub("", content) + content = self.content_repatter2.sub("", content) + content = self.content_repatter3.sub("", content) + content = self.content_repatter4.sub("", content) + content = self.content_repatter5.sub("", content) + content = self.content_repatter6.sub("", content) + content = content.translate(self.content_trans1) + while "" in content: + content = content.replace("", "") + return content + + def tokenize(self, text, clean=False): + text = text.replace(" ", "") + text = text.replace(" ", "") + text = text.replace("\r\n", "
") + text = text.replace("\n", "
") + text = text.replace("\r", "
") + text = text.replace("\t", "") + text = text.replace("—", "ー") + text = text.replace("−", "ー") + for k, v in self.emoji["emoji"].items(): + if k in text: + text = text.replace(k, v) + if clean: + text = self.clean_text(text) + + def check_simbol(x): + e = x.encode() + if len(x) == 1 and len(e) == 2: + c = (int(e[0]) << 8) + int(e[1]) + if ( + (c >= 0xC2A1 and c <= 0xC2BF) + or (c >= 0xC780 and c <= 0xC783) + or (c >= 0xCAB9 and c <= 0xCBBF) + or (c >= 0xCC80 and c <= 0xCDA2) + ): + return True + return False + + def checku2e(x): + e = x.encode() + if len(x) == 1 and len(e) == 3: + c = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2]) + if c >= 0xE28080 and c <= 0xE2B07F: + return True + return False + + pos = 0 + result = [] + while pos < len(text): + end = min(len(text), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3 + candidates = [] # (token_id, token, pos) + for e in range(end, pos, -1): + wd = text[pos:e] + if wd in self.vocab: + if wd[0] == "<" and len(wd) > 2: + candidates = [(self.vocab[wd], wd, e)] + break + else: + candidates.append((self.vocab[wd], wd, e)) + if len(candidates) > 0: + # the smallest token_id is adopted + _, wd, e = sorted(candidates, key=lambda x: x[0])[0] + result.append(wd) + pos = e + else: + end = pos + 1 + wd = text[pos:end] + if check_simbol(wd): + result.append("") + elif checku2e(wd): + result.append("") + else: + for i in wd.encode("utf-8"): + result.append("<|byte%d|>" % i) + pos = end + return result + + def convert_id_to_token(self, index, breakline="\n"): + words = [] + byte_tokens = [] + word = self.ids_to_tokens[index][0] + if word[:6] == "<|byte" and word[-2:] == "|>": + byte_tokens.append(int(word[6:-2])) + else: + if len(byte_tokens) > 0: + words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) + byte_tokens = [] + if word[:7] == "<|emoji" and word[-2:] == "|>": + words.append(self.emoji["emoji_inv"][word]) + elif word == "": + words.append(" ") + elif word == "
": + words.append(breakline) + elif word == "": + words.append("\t") + elif word == "": + words.append("▀") + elif word == "": + words.append("ǀ") + elif word == "": + words.append("‖") + else: + words.append(word) + if len(byte_tokens) > 0: + words.append(bytearray(byte_tokens).decode("utf-8", errors="replace")) + text = "".join(words) + return text diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 34a4cf335274fd..b656cee9c89bdc 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -2355,6 +2355,37 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class GPTNeoXJapaneseForCausalLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXJapaneseLayer(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXJapaneseModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class GPTNeoXJapanesePreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_tokenizers_objects.py b/src/transformers/utils/dummy_tokenizers_objects.py index 755be5c48ae528..7a469bdff36126 100644 --- a/src/transformers/utils/dummy_tokenizers_objects.py +++ b/src/transformers/utils/dummy_tokenizers_objects.py @@ -171,6 +171,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) +class GPTNeoXJapaneseTokenizer(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + class HerbertTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] diff --git a/tests/models/gpt_neox_japanese/__init__.py b/tests/models/gpt_neox_japanese/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py b/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py new file mode 100644 index 00000000000000..32f118ba06066b --- /dev/null +++ b/tests/models/gpt_neox_japanese/test_modeling_gpt_neox_japanese.py @@ -0,0 +1,255 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch GPTNeoXJapanese model. """ + + +import unittest + +from transformers import GPTNeoXJapaneseConfig, is_torch_available +from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer +from transformers.testing_utils import require_torch, slow, torch_device + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask + + +if is_torch_available(): + import torch + + from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel + + +class GPTNeoXJapaneseModelTester: + def __init__( + self, + parent, + batch_size=13, + seq_length=7, + is_training=True, + use_input_mask=True, + use_token_type_ids=True, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_multiple_size=4, + hidden_act="gelu", + hidden_dropout=0.0, + attention_dropout=0.1, + weight_tying=True, + max_position_embeddings=512, + type_vocab_size=16, + type_sequence_label_size=2, + initializer_range=0.02, + num_labels=3, + num_choices=4, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_token_type_ids = use_token_type_ids + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_multiple_size = intermediate_multiple_size + self.hidden_act = hidden_act + self.hidden_dropout = hidden_dropout + self.attention_dropout = attention_dropout + self.weight_tying = weight_tying + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.num_labels = num_labels + self.num_choices = num_choices + self.scope = scope + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + token_labels = None + if self.use_labels: + token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) + + config = self.get_config() + + return config, input_ids, input_mask, token_labels + + def get_config(self): + return GPTNeoXJapaneseConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_multiple_size=self.intermediate_multiple_size, + hidden_act=self.hidden_act, + hidden_dropout=self.hidden_dropout, + attention_dropout=self.attention_dropout, + weight_tying=self.weight_tying, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + is_decoder=False, + initializer_range=self.initializer_range, + ) + + def prepare_config_and_inputs_for_decoder(self): + config, input_ids, input_mask, token_labels = self.prepare_config_and_inputs() + + config.is_decoder = True + + return config, input_ids, input_mask, token_labels + + def create_and_check_model(self, config, input_ids, input_mask): + model = GPTNeoXJapaneseModel(config=config) + model.to(torch_device) + model.eval() + _ = model(input_ids, attention_mask=input_mask) + result = model(input_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + + def create_and_check_model_as_decoder(self, config, input_ids, input_mask): + config.add_cross_attention = True + model = GPTNeoXJapaneseModel(config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + + def create_and_check_for_causal_lm(self, config, input_ids, input_mask, token_labels): + model = GPTNeoXJapaneseForCausalLM(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, labels=token_labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) + + def create_and_check_decoder_model_past_large_inputs(self, config, input_ids, input_mask): + config.is_decoder = True + model = GPTNeoXJapaneseForCausalLM(config=config) + model.to(torch_device) + model.eval() + + # first forward pass + outputs = model(input_ids, attention_mask=input_mask, use_cache=True) + past_key_values = outputs.past_key_values + + # create hypothetical multiple next token and extent to next_input_ids + next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) + next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) + + # append to next input_ids and + next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) + next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) + + output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_hidden_states=True) + output_from_no_past = output_from_no_past["hidden_states"][0] + output_from_past = model( + next_tokens, + attention_mask=next_attention_mask, + past_key_values=past_key_values, + output_hidden_states=True, + )["hidden_states"][0] + + # select random slice + random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() + output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() + output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() + + self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) + + # test that outputs are equal for slice + self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, input_mask, token_labels = config_and_inputs + inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} + return config, inputs_dict + + +@require_torch +class GPTNeoXModelJapaneseTest(ModelTesterMixin, unittest.TestCase): + + all_model_classes = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () + all_generative_model_classes = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () + test_pruning = False + test_missing_keys = False + test_model_parallel = False + test_head_masking = False + + def setUp(self): + self.model_tester = GPTNeoXJapaneseModelTester(self) + self.config_tester = ConfigTester(self, config_class=GPTNeoXJapaneseConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_model(self): + config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(config, input_ids, input_mask) + + def test_model_as_decoder(self): + config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() + self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) + + def test_model_as_decoder_with_default_input_mask(self): + # This regression test was failing with PyTorch < 1.3 + config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs_for_decoder() + + input_mask = None + + self.model_tester.create_and_check_model_as_decoder(config, input_ids, input_mask) + + def test_decoder_model_past_large_inputs(self): + config, input_ids, input_mask, token_labels = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_decoder_model_past_large_inputs(config, input_ids, input_mask) + + def test_model_for_causal_lm(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) + + @slow + def test_generation(self): + model_id = "abeja/gpt-neox-japanese-2.7b" + + prompts = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] + + EXPECTED_OUTPUTS = [ + "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", + "100年後に必要とされる会社は、「人」が中心の会社です。", + "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", + "国境の長いトンネルを抜けると、そこは雪国だった。", + "美味しい日本食といえば、やっぱりお寿司ですよね。", + ] + + tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained(model_id) + model = GPTNeoXJapaneseForCausalLM.from_pretrained(model_id) + + predicted_outputs = [] + for prompt in prompts: + input_ids = tokenizer(prompt, return_tensors="pt").input_ids + generated_ids = model.generate(input_ids, max_length=50) + generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) + predicted_outputs += generated_string + self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS) diff --git a/tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py b/tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py new file mode 100644 index 00000000000000..4af4da30a7b5e9 --- /dev/null +++ b/tests/models/gpt_neox_japanese/test_tokenization_gpt_neox_japanese.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import os +import unittest + +from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import ( + VOCAB_FILES_NAMES, + GPTNeoXJapaneseTokenizer, +) +from transformers.testing_utils import require_tokenizers, slow + +from ...test_tokenization_common import TokenizerTesterMixin + + +@require_tokenizers +class GPTNeoXJapaneseTokenizationTest(TokenizerTesterMixin, unittest.TestCase): + + tokenizer_class = GPTNeoXJapaneseTokenizer + test_rust_tokenizer = False + from_pretrained_kwargs = {"do_clean_text": False, "add_prefix_space": False} + + def setUp(self): + super().setUp() + + vocab_tokens = [ + "こん", + "こんに", + "にちは", + "ばんは", + "世界,㔺界", + "、", + "。", + "
", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "<|emoji1|>", + "", + "<|startoftext|>", + "<|endoftext|>", + ] + emoji_tokens = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀 + self.special_tokens_map = {"unk_token": ""} + + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + self.emoji_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["emoji_file"]) + with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: + vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) + with open(self.emoji_file, "w") as emoji_writer: + emoji_writer.write(json.dumps(emoji_tokens)) + + def get_tokenizer(self, **kwargs): + kwargs.update(self.special_tokens_map) + return GPTNeoXJapaneseTokenizer.from_pretrained(self.tmpdirname, **kwargs) + + def get_input_output_texts(self, tokenizer): + input_text = "こんにちは、世界。 \nこんばんは、㔺界。😀" + output_text = "こんにちは、世界。 \nこんばんは、世界。😀" + return input_text, output_text + + def get_clean_sequence(self, tokenizer): + input_text, output_text = self.get_input_output_texts(tokenizer) + ids = tokenizer.encode(output_text, add_special_tokens=False) + text = tokenizer.decode(ids, clean_up_tokenization_spaces=False) + return text, ids + + def test_pretokenized_inputs(self): + pass # TODO add if relevant + + def test_maximum_encoding_length_pair_input(self): + pass # TODO add if relevant + + def test_maximum_encoding_length_single_input(self): + pass # TODO add if relevant + + def test_full_tokenizer(self): + tokenizer = self.get_tokenizer() + + # Testing tokenization + input_text = "こんにちは、世界。 こんばんは、㔺界。" + expected_token = ["こん", "にちは", "、", "世界", "。", "", "こん", "ばんは", "、", "㔺界", "。"] + tokens = tokenizer.tokenize(input_text) + self.assertListEqual(tokens, expected_token) + + # Testing conversion to ids without special tokens + expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] + input_ids = tokenizer.convert_tokens_to_ids(tokens) + self.assertListEqual(input_ids, expected_ids) + + # Testing conversion to ids with special tokens + input_tokens = tokens + [tokenizer.unk_token] + expected_ids = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] + input_ids = tokenizer.convert_tokens_to_ids(input_tokens) + self.assertListEqual(input_ids, expected_ids) + + @slow + def test_sequence_builders(self): + tokenizer = self.tokenizer_class.from_pretrained("abeja/gpt-neox-japanese-2.7b") + + ids_1 = tokenizer.encode("ありがとう。", add_special_tokens=False) + ids_2 = tokenizer.encode("どういたしまして。", add_special_tokens=False) + + encoded_sentence = tokenizer.build_inputs_with_special_tokens(ids_1) + encoded_pair = tokenizer.build_inputs_with_special_tokens(ids_1, ids_2) + + assert encoded_sentence == ids_1 + assert encoded_pair == ids_1 + ids_2 + + def test_conversion_reversible(self): + # Intentionally convert some words to accommodate character fluctuations unique to Japanese + pass + + def test_padding_different_model_input_name(self): + # tokenizer has no padding token + pass From 4eb36f2921fed7d57aa9ff27a05942bd9402c6f0 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 14 Sep 2022 10:38:39 -0400 Subject: [PATCH 293/539] Mark right save_load test as slow (#19031) --- tests/models/vit_mae/test_modeling_tf_vit_mae.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/vit_mae/test_modeling_tf_vit_mae.py b/tests/models/vit_mae/test_modeling_tf_vit_mae.py index e0613b3b4b3c58..e9db7ea6b2729c 100644 --- a/tests/models/vit_mae/test_modeling_tf_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_tf_vit_mae.py @@ -328,7 +328,6 @@ def test_compile_tf_model(self): # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test - @slow def test_keras_save_load(self): # make mask reproducible np.random.seed(2) @@ -376,6 +375,7 @@ def test_keras_save_load(self): # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test + @slow def test_save_load(self): # make mask reproducible np.random.seed(2) From 693ba2cc79ef46e8323b35f791233ce94f433b5d Mon Sep 17 00:00:00 2001 From: lewtun Date: Wed, 14 Sep 2022 17:53:42 +0200 Subject: [PATCH 294/539] Fix GPT-NeoX doc examples (#19033) --- src/transformers/models/gpt_neox/modeling_gpt_neox.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/transformers/models/gpt_neox/modeling_gpt_neox.py b/src/transformers/models/gpt_neox/modeling_gpt_neox.py index 569ead7bdf3f01..a3280137dcf04e 100755 --- a/src/transformers/models/gpt_neox/modeling_gpt_neox.py +++ b/src/transformers/models/gpt_neox/modeling_gpt_neox.py @@ -354,7 +354,7 @@ def forward( input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. - Indices can be obtained using [`GPTNeoXTokenizer`]. See [`PreTrainedTokenizer.encode`] and + Indices can be obtained using [`GPTNeoXTokenizerFast`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) @@ -601,13 +601,13 @@ def forward( Example: ```python - >>> from transformers import GPTNeoXTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig + >>> from transformers import GPTNeoXTokenizerFast, GPTNeoXForCausalLM, GPTNeoXConfig >>> import torch - >>> tokenizer = GPTNeoXTokenizer.from_pretrained("gpt-neox-20b") - >>> config = GPTNeoXConfig.from_pretrained("gpt-neox-20b") + >>> tokenizer = GPTNeoXTokenizerFast.from_pretrained("EleutherAI/gpt-neox-20b") + >>> config = GPTNeoXConfig.from_pretrained("EleutherAI/gpt-neox-20b") >>> config.is_decoder = True - >>> model = GPTNeoXForCausalLM.from_pretrained("gpt-neox-20b", config=config) + >>> model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) From 31be02f14b1724c677bb2e32a5101c7cb6448556 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Wed, 14 Sep 2022 18:19:15 +0100 Subject: [PATCH 295/539] TF: tf.debugging assertions without tf.running_eagerly() protection (#19030) --- .../models/bart/modeling_tf_bart.py | 104 +++---- .../blenderbot/modeling_tf_blenderbot.py | 104 +++---- .../modeling_tf_blenderbot_small.py | 104 +++---- .../modeling_tf_encoder_decoder.py | 11 +- .../models/flaubert/modeling_tf_flaubert.py | 26 +- .../models/hubert/modeling_tf_hubert.py | 72 ++--- .../models/led/modeling_tf_led.py | 281 ++++++++---------- .../longformer/modeling_tf_longformer.py | 193 ++++++------ .../models/marian/modeling_tf_marian.py | 104 +++---- .../models/mbart/modeling_tf_mbart.py | 93 +++--- .../models/opt/modeling_tf_opt.py | 76 ++--- .../models/pegasus/modeling_tf_pegasus.py | 104 +++---- .../modeling_tf_speech_to_text.py | 102 +++---- .../modeling_tf_vision_encoder_decoder.py | 11 +- .../models/wav2vec2/modeling_tf_wav2vec2.py | 72 ++--- .../models/xglm/modeling_tf_xglm.py | 76 ++--- .../models/xlm/modeling_tf_xlm.py | 26 +- ...tf_{{cookiecutter.lowercase_modelname}}.py | 84 ++---- 18 files changed, 696 insertions(+), 947 deletions(-) diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index 35e8665984d786..3f448b4d2e163b 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -71,13 +71,12 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) - if tf.executing_eagerly(): - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @@ -229,31 +228,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -261,17 +254,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -281,17 +271,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -339,14 +326,11 @@ def call( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states @@ -776,9 +760,7 @@ def call( all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), @@ -983,10 +965,8 @@ def call( present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), diff --git a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py index 4bd4cd481b3e1d..7843e057f3c986 100644 --- a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py @@ -73,13 +73,12 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) - if tf.executing_eagerly(): - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @@ -225,31 +224,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -257,17 +250,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -277,17 +267,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -337,14 +324,11 @@ def call( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states @@ -755,9 +739,7 @@ def call( all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), @@ -966,10 +948,8 @@ def call( present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), diff --git a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py index 84eddc2cc35e1a..2f1a94ba9606ba 100644 --- a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py @@ -72,13 +72,12 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) - if tf.executing_eagerly(): - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @@ -225,31 +224,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -257,17 +250,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -277,17 +267,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -336,14 +323,11 @@ def call( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states @@ -761,9 +745,7 @@ def call( all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), @@ -968,10 +950,8 @@ def call( present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), diff --git a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py index c74865617cf76d..8c5965ba1deaaa 100644 --- a/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py +++ b/src/transformers/models/encoder_decoder/modeling_tf_encoder_decoder.py @@ -171,13 +171,12 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) - if tf.executing_eagerly(): - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/flaubert/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py index d475c5774a6bc3..b33e057232485d 100644 --- a/src/transformers/models/flaubert/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -200,9 +200,9 @@ def get_masks(slen, lengths, causal, padding_mask=None): # sanity check # assert shape_list(mask) == [bs, slen] - if tf.executing_eagerly(): - tf.debugging.assert_equal(shape_list(mask), [bs, slen]) - assert causal is False or shape_list(attn_mask) == [bs, slen, slen] + tf.debugging.assert_equal(shape_list(mask), [bs, slen]) + if causal: + tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen]) return mask, attn_mask @@ -517,10 +517,9 @@ def call( # check inputs # assert shape_list(lengths)[0] == bs - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(lengths)[0], bs - ), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched" + tf.debugging.assert_equal( + shape_list(lengths)[0], bs + ), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched" # assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 # assert (src_enc is None) == (src_len is None) @@ -538,15 +537,14 @@ def call( position_ids = tf.expand_dims(tf.range(slen), axis=0) position_ids = tf.tile(position_ids, (bs, 1)) - if tf.executing_eagerly(): - # assert shape_list(position_ids) == [bs, slen] # (slen, bs) - tf.debugging.assert_equal( - shape_list(position_ids), [bs, slen] - ), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched" - # position_ids = position_ids.transpose(0, 1) + # assert shape_list(position_ids) == [bs, slen] # (slen, bs) + tf.debugging.assert_equal( + shape_list(position_ids), [bs, slen] + ), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched" + # position_ids = position_ids.transpose(0, 1) # langs - if langs is not None and tf.executing_eagerly(): + if langs is not None: # assert shape_list(langs) == [bs, slen] # (slen, bs) tf.debugging.assert_equal( shape_list(langs), [bs, slen] diff --git a/src/transformers/models/hubert/modeling_tf_hubert.py b/src/transformers/models/hubert/modeling_tf_hubert.py index b3aab9a1e857e7..e9cf9adb76f674 100644 --- a/src/transformers/models/hubert/modeling_tf_hubert.py +++ b/src/transformers/models/hubert/modeling_tf_hubert.py @@ -816,31 +816,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -848,17 +842,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -868,17 +859,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index c677581635f93b..a91f769fbf5f43 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -64,12 +64,11 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to ) # "Verify that `labels` has only positive values and -100" - if tf.executing_eagerly(): - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0)) + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @@ -213,12 +212,11 @@ def call( value_vectors = self.value(hidden_states) batch_size, seq_len, embed_dim = shape_list(hidden_states) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - embed_dim, - self.embed_dim, - message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}", - ) + tf.debugging.assert_equal( + embed_dim, + self.embed_dim, + message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}", + ) # normalize query query_vectors /= tf.math.sqrt(tf.cast(self.head_dim, dtype=query_vectors.dtype)) @@ -245,15 +243,14 @@ def call( # pad local attention probs attn_scores += diagonal_mask - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_scores), - [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1], - message=( - f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}," - f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_scores), + [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1], + message=( + f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}," + f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}" + ), + ) # compute global attn indices required through out forward fn ( @@ -301,15 +298,14 @@ def call( ) if layer_head_mask is not None: - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_probs = tf.reshape(layer_head_mask, (1, 1, -1, 1)) * attn_probs @@ -332,12 +328,9 @@ def call( ), ) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [batch_size, seq_len, self.num_heads, self.head_dim], - message="Unexpected size", - ) + tf.debugging.assert_equal( + shape_list(attn_output), [batch_size, seq_len, self.num_heads, self.head_dim], message="Unexpected size" + ) attn_output = tf.reshape(attn_output, (batch_size, seq_len, embed_dim)) @@ -392,20 +385,19 @@ def _sliding_chunks_query_key_matmul(self, query, key, window_overlap): """ batch_size, seq_len, num_heads, head_dim = shape_list(query) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - seq_len % (window_overlap * 2), - 0, - message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}", - ) - tf.debugging.assert_equal( - shape_list(query), - shape_list(key), - message=( - f"Shape of query and key should be equal, but got query: {shape_list(query)} and key:" - f" {shape_list(key)}" - ), - ) + tf.debugging.assert_equal( + seq_len % (window_overlap * 2), + 0, + message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}", + ) + tf.debugging.assert_equal( + shape_list(query), + shape_list(key), + message=( + f"Shape of query and key should be equal, but got query: {shape_list(query)} and key:" + f" {shape_list(key)}" + ), + ) chunks_count = seq_len // window_overlap - 1 @@ -539,22 +531,19 @@ def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_over batch_size, seq_len, num_heads, head_dim = shape_list(value) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - seq_len % (window_overlap * 2), - 0, - message="Seq_len has to be multiple of 2 * window_overlap", - ) - tf.debugging.assert_equal( - shape_list(attn_probs)[:3], - shape_list(value)[:3], - message="value and attn_probs must have same dims (except head_dim)", - ) - tf.debugging.assert_equal( - shape_list(attn_probs)[3], - 2 * window_overlap + 1, - message="attn_probs last dim has to be 2 * window_overlap + 1", - ) + tf.debugging.assert_equal( + seq_len % (window_overlap * 2), 0, message="Seq_len has to be multiple of 2 * window_overlap" + ) + tf.debugging.assert_equal( + shape_list(attn_probs)[:3], + shape_list(value)[:3], + message="value and attn_probs must have same dims (except head_dim)", + ) + tf.debugging.assert_equal( + shape_list(attn_probs)[3], + 2 * window_overlap + 1, + message="attn_probs last dim has to be 2 * window_overlap + 1", + ) chunks_count = seq_len // window_overlap - 1 @@ -592,12 +581,11 @@ def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_over (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim), ) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(chunked_value), - [batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim], - message="Chunked value has the wrong shape", - ) + tf.debugging.assert_equal( + shape_list(chunked_value), + [batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim], + message="Chunked value has the wrong shape", + ) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value) @@ -685,15 +673,14 @@ def _chunk(hidden_states, window_overlap): # chunk with overlap chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(chunked_hidden_states), - [batch_size, num_output_chunks, frame_size], - message=( - "Make sure chunking is correctly applied. `Chunked hidden states should have output dimension" - f" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}." - ), - ) + tf.debugging.assert_equal( + shape_list(chunked_hidden_states), + [batch_size, num_output_chunks, frame_size], + message=( + "Make sure chunking is correctly applied. `Chunked hidden states should have output dimension" + f" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}." + ), + ) chunked_hidden_states = tf.reshape( chunked_hidden_states, @@ -866,16 +853,15 @@ def _compute_global_attn_output_from_hidden( # compute attn scores global_attn_scores = tf.matmul(global_query_vectors_only_global, global_key_vectors, transpose_b=True) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(global_attn_scores), - [batch_size * self.num_heads, max_num_global_attn_indices, seq_len], - message=( - "global_attn_scores have the wrong size. Size should be" - f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is" - f" {shape_list(global_attn_scores)}." - ), - ) + tf.debugging.assert_equal( + shape_list(global_attn_scores), + [batch_size * self.num_heads, max_num_global_attn_indices, seq_len], + message=( + "global_attn_scores have the wrong size. Size should be" + f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is" + f" {shape_list(global_attn_scores)}." + ), + ) global_attn_scores = tf.reshape( global_attn_scores, @@ -909,15 +895,14 @@ def _compute_global_attn_output_from_hidden( # apply layer head masking if layer_head_mask is not None: - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) global_attn_probs_float = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( global_attn_probs_float, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len) ) @@ -931,16 +916,15 @@ def _compute_global_attn_output_from_hidden( # global attn output global_attn_output = tf.matmul(global_attn_probs, global_value_vectors) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(global_attn_output), - [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim], - message=( - "global_attn_output tensor has the wrong size. Size should be" - f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is" - f" {shape_list(global_attn_output)}." - ), - ) + tf.debugging.assert_equal( + shape_list(global_attn_output), + [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim], + message=( + "global_attn_output tensor has the wrong size. Size should be" + f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is" + f" {shape_list(global_attn_output)}." + ), + ) global_attn_output = tf.reshape( global_attn_output, @@ -1091,27 +1075,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + tf.cast( attention_mask, dtype=attn_weights.dtype ) @@ -1120,15 +1102,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -1139,15 +1120,14 @@ def call( attn_output = tf.matmul(attn_probs, value_states) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -1199,12 +1179,11 @@ def call( hidden_states = layer_outputs[0] - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states @@ -1792,7 +1771,7 @@ def call( all_attentions = all_global_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), @@ -2055,7 +2034,7 @@ def call( present_key_values = () # check if head_mask has a correct number of layers specified if desired - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index 6b491638cc5f78..51b8cca0b07d9a 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -738,12 +738,11 @@ def call( value_vectors = self.value(hidden_states) batch_size, seq_len, embed_dim = shape_list(hidden_states) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - embed_dim, - self.embed_dim, - message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}", - ) + tf.debugging.assert_equal( + embed_dim, + self.embed_dim, + message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}", + ) # normalize query query_vectors /= tf.math.sqrt(tf.cast(self.head_dim, dtype=query_vectors.dtype)) @@ -770,15 +769,14 @@ def call( # pad local attention probs attn_scores += diagonal_mask - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_scores), - [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1], - message=( - f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}," - f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_scores), + [batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1], + message=( + f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}," + f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}" + ), + ) # compute global attn indices required through out forward fn ( @@ -826,15 +824,14 @@ def call( ) if layer_head_mask is not None: - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_probs = tf.reshape(layer_head_mask, (1, 1, -1, 1)) * attn_probs @@ -857,12 +854,9 @@ def call( ), ) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [batch_size, seq_len, self.num_heads, self.head_dim], - message="Unexpected size", - ) + tf.debugging.assert_equal( + shape_list(attn_output), [batch_size, seq_len, self.num_heads, self.head_dim], message="Unexpected size" + ) attn_output = tf.reshape(attn_output, (batch_size, seq_len, embed_dim)) @@ -917,20 +911,19 @@ def _sliding_chunks_query_key_matmul(self, query, key, window_overlap): """ batch_size, seq_len, num_heads, head_dim = shape_list(query) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - seq_len % (window_overlap * 2), - 0, - message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}", - ) - tf.debugging.assert_equal( - shape_list(query), - shape_list(key), - message=( - f"Shape of query and key should be equal, but got query: {shape_list(query)} and key:" - f" {shape_list(key)}" - ), - ) + tf.debugging.assert_equal( + seq_len % (window_overlap * 2), + 0, + message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}", + ) + tf.debugging.assert_equal( + shape_list(query), + shape_list(key), + message=( + f"Shape of query and key should be equal, but got query: {shape_list(query)} and key:" + f" {shape_list(key)}" + ), + ) chunks_count = seq_len // window_overlap - 1 @@ -1064,22 +1057,19 @@ def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_over batch_size, seq_len, num_heads, head_dim = shape_list(value) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - seq_len % (window_overlap * 2), - 0, - message="Seq_len has to be multiple of 2 * window_overlap", - ) - tf.debugging.assert_equal( - shape_list(attn_probs)[:3], - shape_list(value)[:3], - message="value and attn_probs must have same dims (except head_dim)", - ) - tf.debugging.assert_equal( - shape_list(attn_probs)[3], - 2 * window_overlap + 1, - message="attn_probs last dim has to be 2 * window_overlap + 1", - ) + tf.debugging.assert_equal( + seq_len % (window_overlap * 2), 0, message="Seq_len has to be multiple of 2 * window_overlap" + ) + tf.debugging.assert_equal( + shape_list(attn_probs)[:3], + shape_list(value)[:3], + message="value and attn_probs must have same dims (except head_dim)", + ) + tf.debugging.assert_equal( + shape_list(attn_probs)[3], + 2 * window_overlap + 1, + message="attn_probs last dim has to be 2 * window_overlap + 1", + ) chunks_count = seq_len // window_overlap - 1 @@ -1117,12 +1107,11 @@ def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_over (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim), ) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(chunked_value), - [batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim], - message="Chunked value has the wrong shape", - ) + tf.debugging.assert_equal( + shape_list(chunked_value), + [batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim], + message="Chunked value has the wrong shape", + ) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value) @@ -1210,15 +1199,14 @@ def _chunk(hidden_states, window_overlap): # chunk with overlap chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(chunked_hidden_states), - [batch_size, num_output_chunks, frame_size], - message=( - "Make sure chunking is correctly applied. `Chunked hidden states should have output dimension" - f" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}." - ), - ) + tf.debugging.assert_equal( + shape_list(chunked_hidden_states), + [batch_size, num_output_chunks, frame_size], + message=( + "Make sure chunking is correctly applied. `Chunked hidden states should have output dimension" + f" {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}." + ), + ) chunked_hidden_states = tf.reshape( chunked_hidden_states, @@ -1391,16 +1379,15 @@ def _compute_global_attn_output_from_hidden( # compute attn scores global_attn_scores = tf.matmul(global_query_vectors_only_global, global_key_vectors, transpose_b=True) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(global_attn_scores), - [batch_size * self.num_heads, max_num_global_attn_indices, seq_len], - message=( - "global_attn_scores have the wrong size. Size should be" - f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is" - f" {shape_list(global_attn_scores)}." - ), - ) + tf.debugging.assert_equal( + shape_list(global_attn_scores), + [batch_size * self.num_heads, max_num_global_attn_indices, seq_len], + message=( + "global_attn_scores have the wrong size. Size should be" + f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is" + f" {shape_list(global_attn_scores)}." + ), + ) global_attn_scores = tf.reshape( global_attn_scores, @@ -1434,15 +1421,14 @@ def _compute_global_attn_output_from_hidden( # apply layer head masking if layer_head_mask is not None: - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) global_attn_probs_float = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( global_attn_probs_float, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len) ) @@ -1456,16 +1442,15 @@ def _compute_global_attn_output_from_hidden( # global attn output global_attn_output = tf.matmul(global_attn_probs, global_value_vectors) - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(global_attn_output), - [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim], - message=( - "global_attn_output tensor has the wrong size. Size should be" - f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is" - f" {shape_list(global_attn_output)}." - ), - ) + tf.debugging.assert_equal( + shape_list(global_attn_output), + [batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim], + message=( + "global_attn_output tensor has the wrong size. Size should be" + f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is" + f" {shape_list(global_attn_output)}." + ), + ) global_attn_output = tf.reshape( global_attn_output, diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index 27fceae6d9a7dd..01522346d8fe29 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -72,13 +72,12 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) - if tf.executing_eagerly(): - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @@ -264,31 +263,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -296,17 +289,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -316,17 +306,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -375,14 +362,11 @@ def call( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states @@ -801,9 +785,7 @@ def call( all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), @@ -1009,10 +991,8 @@ def call( present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. for attn_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index 91cc3373283f18..0b7a81aa33c18b 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -232,31 +232,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -264,17 +258,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -284,17 +275,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -343,14 +331,11 @@ def call( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states @@ -786,9 +771,7 @@ def call( all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), @@ -1001,10 +984,8 @@ def call( present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), diff --git a/src/transformers/models/opt/modeling_tf_opt.py b/src/transformers/models/opt/modeling_tf_opt.py index 535a616db39e9a..8be1a8f091a29c 100644 --- a/src/transformers/models/opt/modeling_tf_opt.py +++ b/src/transformers/models/opt/modeling_tf_opt.py @@ -206,31 +206,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -238,17 +232,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -258,17 +249,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -664,10 +652,8 @@ def call( present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. for attn_mask_name, attn_mask in [("head_mask", head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), diff --git a/src/transformers/models/pegasus/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py index 35d59c6b125607..63017efb0353a4 100644 --- a/src/transformers/models/pegasus/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -72,13 +72,12 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) - if tf.executing_eagerly(): - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @@ -265,31 +264,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -297,17 +290,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -317,17 +307,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -377,14 +364,11 @@ def call( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states @@ -804,9 +788,7 @@ def call( all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), @@ -1015,10 +997,8 @@ def call( present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), diff --git a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py index b269b2cb85aedd..ccae37b3eb5378 100755 --- a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py @@ -74,13 +74,12 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) - if tf.executing_eagerly(): - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @@ -324,31 +323,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -356,17 +349,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -376,17 +366,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -434,14 +421,11 @@ def call( training=training, ) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states @@ -866,8 +850,7 @@ def call( all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they have to be disabled in other modes than eager. - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), @@ -1068,9 +1051,8 @@ def call( next_decoder_cache = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they have to be disabled in other modes than eager. for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), diff --git a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py index 45924e2666c90e..54a105a932bf10 100644 --- a/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/modeling_tf_vision_encoder_decoder.py @@ -161,13 +161,12 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) - if tf.executing_eagerly(): - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids diff --git a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py index 9e80886ee55ca4..58110b51207d16 100644 --- a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py @@ -852,31 +852,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -884,17 +878,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -904,17 +895,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) diff --git a/src/transformers/models/xglm/modeling_tf_xglm.py b/src/transformers/models/xglm/modeling_tf_xglm.py index 7ed399ac29ae1d..ac11e7ae7c68c9 100644 --- a/src/transformers/models/xglm/modeling_tf_xglm.py +++ b/src/transformers/models/xglm/modeling_tf_xglm.py @@ -239,31 +239,25 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], message=( - f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" - f" {shape_list(attn_weights)}" + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" ), ) - if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=( - f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" - f" {shape_list(attention_mask)}" - ), - ) - attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -271,17 +265,14 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=( - f"Head mask for a single layer should be of size {(self.num_heads)}, but is" - f" {shape_list(layer_head_mask)}" - ), - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -291,17 +282,14 @@ def call( attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=( - f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" - f" {shape_list(attn_output)}" - ), - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -568,10 +556,8 @@ def call( next_decoder_cache = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index c472ecbeabf847..a060986a5704ac 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -105,9 +105,9 @@ def get_masks(slen, lengths, causal, padding_mask=None): # sanity check # assert shape_list(mask) == [bs, slen] - if tf.executing_eagerly(): - tf.debugging.assert_equal(shape_list(mask), [bs, slen]) - assert causal is False or shape_list(attn_mask) == [bs, slen, slen] + tf.debugging.assert_equal(shape_list(mask), [bs, slen]) + if causal: + tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen]) return mask, attn_mask @@ -384,10 +384,9 @@ def call( # check inputs # assert shape_list(lengths)[0] == bs - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(lengths)[0], bs - ), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched" + tf.debugging.assert_equal( + shape_list(lengths)[0], bs + ), f"Expected batch size {shape_list(lengths)[0]} and received batch size {bs} mismatched" # assert lengths.max().item() <= slen # input_ids = input_ids.transpose(0, 1) # batch size as dimension 0 # assert (src_enc is None) == (src_len is None) @@ -405,15 +404,14 @@ def call( position_ids = tf.expand_dims(tf.range(slen), axis=0) position_ids = tf.tile(position_ids, (bs, 1)) - if tf.executing_eagerly(): - # assert shape_list(position_ids) == [bs, slen] # (slen, bs) - tf.debugging.assert_equal( - shape_list(position_ids), [bs, slen] - ), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched" - # position_ids = position_ids.transpose(0, 1) + # assert shape_list(position_ids) == [bs, slen] # (slen, bs) + tf.debugging.assert_equal( + shape_list(position_ids), [bs, slen] + ), f"Position id shape {shape_list(position_ids)} and input shape {[bs, slen]} mismatched" + # position_ids = position_ids.transpose(0, 1) # langs - if langs is not None and tf.executing_eagerly(): + if langs is not None: # assert shape_list(langs) == [bs, slen] # (slen, bs) tf.debugging.assert_equal( shape_list(langs), [bs, slen] diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index c92b80144c263c..0d025ca98c50d4 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -1693,13 +1693,12 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_to shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids ) - if tf.executing_eagerly(): - # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0)) + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0)) - # Make sure the assertion op is called by wrapping the result in an identity no-op - with tf.control_dependencies([assert_gte0]): - shifted_input_ids = tf.identity(shifted_input_ids) + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids @@ -1837,24 +1836,18 @@ def call( src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_weights), - [bsz * self.num_heads, tgt_len, src_len], - message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}", - ) + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}", + ) if attention_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attention_mask), - [bsz, 1, tgt_len, src_len], - message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}", - ) + tf.debugging.assert_equal( + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], + message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}", + ) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) @@ -1862,14 +1855,11 @@ def call( attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(layer_head_mask), - [self.num_heads], - message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}", - ) + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}", + ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) @@ -1880,14 +1870,11 @@ def call( attn_output = tf.matmul(attn_probs, value_states) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(attn_output), - [bsz * self.num_heads, tgt_len, self.head_dim], - message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}", - ) + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}", + ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) @@ -1929,14 +1916,11 @@ def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_m hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if tf.executing_eagerly(): - tf.debugging.assert_equal( - shape_list(hidden_states), - shape_list(residual), - message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", - ) + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states @@ -2332,9 +2316,7 @@ def call( all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. - if head_mask is not None and tf.executing_eagerly(): + if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), @@ -2529,10 +2511,8 @@ def call( present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired - # The tf.debugging asserts are not compliant with XLA then they - # have to be disabled in other modes than eager. for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: - if attn_mask is not None and tf.executing_eagerly(): + if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), From 0e24548081f8b1c933e3f5f9d8abac8c5a471117 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 14 Sep 2022 13:28:40 -0400 Subject: [PATCH 296/539] Add safeguards for CUDA kernel load in Deformable DETR (#19037) --- .../models/deformable_detr/modeling_deformable_detr.py | 10 +++++++--- src/transformers/utils/__init__.py | 1 + src/transformers/utils/import_utils.py | 4 ++++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/deformable_detr/modeling_deformable_detr.py b/src/transformers/models/deformable_detr/modeling_deformable_detr.py index acd4d4012470ff..5038cd391c2b3c 100755 --- a/src/transformers/models/deformable_detr/modeling_deformable_detr.py +++ b/src/transformers/models/deformable_detr/modeling_deformable_detr.py @@ -41,7 +41,7 @@ ) from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel -from ...utils import logging +from ...utils import is_ninja_available, logging from .configuration_deformable_detr import DeformableDetrConfig from .load_custom import load_cuda_kernels @@ -49,9 +49,13 @@ logger = logging.get_logger(__name__) # Move this to not compile only when importing, this needs to happen later, like in __init__. -if is_torch_cuda_available(): +if is_torch_cuda_available() and is_ninja_available(): logger.info("Loading custom CUDA kernels...") - MultiScaleDeformableAttention = load_cuda_kernels() + try: + MultiScaleDeformableAttention = load_cuda_kernels() + except Exception as e: + logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") + MultiScaleDeformableAttention = None else: MultiScaleDeformableAttention = None diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 44c3e1807860f6..9572a673f67181 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -98,6 +98,7 @@ is_in_notebook, is_ipex_available, is_librosa_available, + is_ninja_available, is_onnx_available, is_pandas_available, is_phonemizer_available, diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index d0dff20a9719d2..f2cf5ffd9bff41 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -471,6 +471,10 @@ def is_apex_available(): return importlib.util.find_spec("apex") is not None +def is_ninja_available(): + return importlib.util.find_spec("ninja") is not None + + def is_ipex_available(): def get_major_and_minor_from_version(full_version): return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor) From 0efbb6e93e9bb5307e1925746980e102b94e7254 Mon Sep 17 00:00:00 2001 From: SaulLu <55560583+SaulLu@users.noreply.github.com> Date: Wed, 14 Sep 2022 19:32:12 +0200 Subject: [PATCH 297/539] fix GPT2 token's `special_tokens_mask` when used with `add_bos_token=True` (#19036) --- .../models/gpt2/tokenization_gpt2.py | 32 +++++++++++++++++++ tests/models/gpt2/test_tokenization_gpt2.py | 25 +++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/src/transformers/models/gpt2/tokenization_gpt2.py b/src/transformers/models/gpt2/tokenization_gpt2.py index b480eca0c062ce..1be35fbfdf4919 100644 --- a/src/transformers/models/gpt2/tokenization_gpt2.py +++ b/src/transformers/models/gpt2/tokenization_gpt2.py @@ -261,6 +261,38 @@ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): return output + bos_token_ids + token_ids_1 + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if not self.add_bos_token: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] diff --git a/tests/models/gpt2/test_tokenization_gpt2.py b/tests/models/gpt2/test_tokenization_gpt2.py index d76bc75ccbd582..3b2272d8548bc7 100644 --- a/tests/models/gpt2/test_tokenization_gpt2.py +++ b/tests/models/gpt2/test_tokenization_gpt2.py @@ -250,3 +250,28 @@ def test_add_bos_token_slow(self): # tokenizer has no padding token def test_padding_different_model_input_name(self): pass + + def test_special_tokens_mask_input_pairs_and_bos_token(self): + # TODO: change to self.get_tokenizers() when the fast version is implemented + tokenizers = [self.get_tokenizer(do_lower_case=False, add_bos_token=True)] + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + sequence_0 = "Encode this." + sequence_1 = "This one too please." + encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False) + encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False) + encoded_sequence_dict = tokenizer.encode_plus( + sequence_0, + sequence_1, + add_special_tokens=True, + return_special_tokens_mask=True, + ) + encoded_sequence_w_special = encoded_sequence_dict["input_ids"] + special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] + self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) + + filtered_sequence = [ + (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) + ] + filtered_sequence = [x for x in filtered_sequence if x is not None] + self.assertEqual(encoded_sequence, filtered_sequence) From 377401016188b21c50d6b5d62c72cb0c7ecb6ce0 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 14 Sep 2022 14:06:49 -0400 Subject: [PATCH 298/539] Automate check for new pipelines and metadata update (#19029) * Automate check for new pipelines and metadata update * Add Datasets to quality extra --- .circleci/config.yml | 1 + Makefile | 1 + setup.py | 2 +- utils/update_metadata.py | 33 ++++++++++++++++++++++++++++++++- 4 files changed, 35 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a8fcedad3f47c4..556a97479cef6a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -982,6 +982,7 @@ jobs: - run: python utils/check_config_docstrings.py - run: make deps_table_check_updated - run: python utils/tests_fetcher.py --sanity_check + - run: python utils/update_metadata.py --check-only run_tests_layoutlmv2_and_v3: working_directory: ~/transformers diff --git a/Makefile b/Makefile index 6c6200cfe72800..999ddd6ee15605 100644 --- a/Makefile +++ b/Makefile @@ -41,6 +41,7 @@ repo-consistency: python utils/check_inits.py python utils/check_config_docstrings.py python utils/tests_fetcher.py --sanity_check + python utils/update_metadata.py --check-only # this target runs checks on all files diff --git a/setup.py b/setup.py index 68a62b785d2f04..799509cc9d2d31 100644 --- a/setup.py +++ b/setup.py @@ -307,7 +307,7 @@ def run(self): extras["deepspeed-testing"] = extras["deepspeed"] + extras["testing"] + extras["optuna"] -extras["quality"] = deps_list("black", "isort", "flake8", "GitPython", "hf-doc-builder") +extras["quality"] = deps_list("black", "datasets", "isort", "flake8", "GitPython", "hf-doc-builder") extras["all"] = ( extras["tf"] diff --git a/utils/update_metadata.py b/utils/update_metadata.py index 7eb0294cda9e33..5ccd07aeb70c40 100644 --- a/utils/update_metadata.py +++ b/utils/update_metadata.py @@ -85,6 +85,12 @@ "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), + ( + "visual-question-answering", + "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", + "AutoModelForVisualQuestionAnswering", + ), + ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ] @@ -236,10 +242,35 @@ def update_metadata(token, commit_sha): repo.push_to_hub(commit_message) +def check_pipeline_tags(): + in_table = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} + pipeline_tasks = transformers_module.pipelines.SUPPORTED_TASKS + missing = [] + for key in pipeline_tasks: + if key not in in_table: + model = pipeline_tasks[key]["pt"] + if isinstance(model, (list, tuple)): + model = model[0] + model = model.__name__ + if model not in in_table.values(): + missing.append(key) + + if len(missing) > 0: + msg = ", ".join(missing) + raise ValueError( + "The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside " + f"`utils/update_metadata.py`: {msg}. Please add them!" + ) + + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") + parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") args = parser.parse_args() - update_metadata(args.token, args.commit_sha) + if args.check_only: + check_pipeline_tags() + else: + update_metadata(args.token, args.commit_sha) From 16913b3c9215f592f1240511f8da271dc07c3552 Mon Sep 17 00:00:00 2001 From: Lysandre Date: Wed, 14 Sep 2022 14:58:20 -0400 Subject: [PATCH 299/539] Dev version --- README.md | 16 ++++++++-------- examples/flax/question-answering/run_qa.py | 2 +- .../flax/text-classification/run_flax_glue.py | 2 +- .../flax/token-classification/run_flax_ner.py | 2 +- .../run_audio_classification.py | 2 +- .../pytorch/contrastive-image-text/run_clip.py | 2 +- .../run_image_classification.py | 2 +- .../run_image_classification_no_trainer.py | 2 +- examples/pytorch/image-pretraining/run_mae.py | 2 +- examples/pytorch/image-pretraining/run_mim.py | 2 +- examples/pytorch/language-modeling/run_clm.py | 2 +- .../language-modeling/run_clm_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_mlm.py | 2 +- .../language-modeling/run_mlm_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_plm.py | 2 +- examples/pytorch/multiple-choice/run_swag.py | 2 +- .../multiple-choice/run_swag_no_trainer.py | 2 +- examples/pytorch/question-answering/run_qa.py | 2 +- .../question-answering/run_qa_beam_search.py | 2 +- .../run_qa_beam_search_no_trainer.py | 2 +- .../question-answering/run_qa_no_trainer.py | 2 +- .../pytorch/question-answering/run_seq2seq_qa.py | 2 +- .../run_semantic_segmentation.py | 2 +- .../run_semantic_segmentation_no_trainer.py | 2 +- .../run_speech_recognition_ctc.py | 2 +- .../run_speech_recognition_seq2seq.py | 2 +- .../pytorch/summarization/run_summarization.py | 2 +- .../run_summarization_no_trainer.py | 2 +- examples/pytorch/text-classification/run_glue.py | 2 +- .../text-classification/run_glue_no_trainer.py | 2 +- examples/pytorch/text-classification/run_xnli.py | 2 +- examples/pytorch/token-classification/run_ner.py | 2 +- .../token-classification/run_ner_no_trainer.py | 2 +- examples/pytorch/translation/run_translation.py | 2 +- .../translation/run_translation_no_trainer.py | 2 +- examples/tensorflow/multiple-choice/run_swag.py | 2 +- examples/tensorflow/question-answering/run_qa.py | 2 +- .../summarization/run_summarization.py | 2 +- .../tensorflow/text-classification/run_glue.py | 2 +- .../tensorflow/translation/run_translation.py | 2 +- setup.py | 2 +- src/transformers/__init__.py | 2 +- 42 files changed, 49 insertions(+), 49 deletions(-) diff --git a/README.md b/README.md index abadb9f57406f1..6071d7885d0218 100644 --- a/README.md +++ b/README.md @@ -285,18 +285,18 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch. -1. **[Deformable DETR](https://huggingface.co/docs/transformers/main/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. +1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. 1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. 1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT. 1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. -1. **[Donut](https://huggingface.co/docs/transformers/main/model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. +1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. -1. **[ERNIE](https://huggingface.co/docs/transformers/main/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. +1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. @@ -305,7 +305,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. 1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach -1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/main/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. +1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. 1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang. @@ -342,7 +342,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. -1. **[PEGASUS-X](https://huggingface.co/docs/transformers/main/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. @@ -365,7 +365,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy. 1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer. 1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo. -1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/main/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo. +1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo. 1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. @@ -377,7 +377,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. -1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. +1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. 1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. @@ -386,7 +386,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. -1. **[X-CLIP](https://huggingface.co/docs/transformers/main/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. +1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. diff --git a/examples/flax/question-answering/run_qa.py b/examples/flax/question-answering/run_qa.py index 1b951e35839816..c044ff628c8abb 100644 --- a/examples/flax/question-answering/run_qa.py +++ b/examples/flax/question-answering/run_qa.py @@ -61,7 +61,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") Array = Any Dataset = datasets.arrow_dataset.Dataset diff --git a/examples/flax/text-classification/run_flax_glue.py b/examples/flax/text-classification/run_flax_glue.py index e0dfab2f52e994..29c2b511fcaa4f 100755 --- a/examples/flax/text-classification/run_flax_glue.py +++ b/examples/flax/text-classification/run_flax_glue.py @@ -54,7 +54,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") Array = Any Dataset = datasets.arrow_dataset.Dataset diff --git a/examples/flax/token-classification/run_flax_ner.py b/examples/flax/token-classification/run_flax_ner.py index ad68c0997fed81..929e249f446a99 100644 --- a/examples/flax/token-classification/run_flax_ner.py +++ b/examples/flax/token-classification/run_flax_ner.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") diff --git a/examples/pytorch/audio-classification/run_audio_classification.py b/examples/pytorch/audio-classification/run_audio_classification.py index 9ebd4fb00759f5..e7d7f30a67dd04 100644 --- a/examples/pytorch/audio-classification/run_audio_classification.py +++ b/examples/pytorch/audio-classification/run_audio_classification.py @@ -45,7 +45,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index d3c5355f9d07cf..797c1c1fb90d00 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -54,7 +54,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt") diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index 28000015ab173a..eb20c9efedf77c 100644 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") diff --git a/examples/pytorch/image-classification/run_image_classification_no_trainer.py b/examples/pytorch/image-classification/run_image_classification_no_trainer.py index b6e27de0dea057..902ba962986e88 100644 --- a/examples/pytorch/image-classification/run_image_classification_no_trainer.py +++ b/examples/pytorch/image-classification/run_image_classification_no_trainer.py @@ -53,7 +53,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/image-pretraining/run_mae.py b/examples/pytorch/image-pretraining/run_mae.py index 3ac4106b11acbf..9e8c366d3d2728 100644 --- a/examples/pytorch/image-pretraining/run_mae.py +++ b/examples/pytorch/image-pretraining/run_mae.py @@ -43,7 +43,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") diff --git a/examples/pytorch/image-pretraining/run_mim.py b/examples/pytorch/image-pretraining/run_mim.py index 7626e8be363253..c05c1b1333f606 100644 --- a/examples/pytorch/image-pretraining/run_mim.py +++ b/examples/pytorch/image-pretraining/run_mim.py @@ -48,7 +48,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index ca992c04562e5e..f0a63ef732d14d 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -54,7 +54,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index 731aff7acbccab..3e1c9048334455 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index b635a7aea69881..a1b1b91057f306 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -53,7 +53,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index c336a6acc5c19a..9ce3cdd09f83bd 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index 4a885ee49661fd..12430b2a8985fc 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -47,7 +47,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index f9df919e1f92da..c8084e25a22138 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -47,7 +47,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/multiple-choice/run_swag_no_trainer.py b/examples/pytorch/multiple-choice/run_swag_no_trainer.py index 43dee8bfdb305f..7235d9494e30a0 100755 --- a/examples/pytorch/multiple-choice/run_swag_no_trainer.py +++ b/examples/pytorch/multiple-choice/run_swag_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = get_logger(__name__) # You should update this to your particular problem to have better documentation of `model_type` diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index 54db2b7bb12d66..65940cd889fb9c 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index ce110ae3646362..a0a1a6c51c4e52 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index 5ab5a3d1756961..e1a70a980d3cb9 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index f10191fbb5ba13..49d5b752bfb564 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index 8ffe114dbb8644..078b58dfdf0ec4 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index bc1bfb2c1c0945..c42dc2a41ca9aa 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -51,7 +51,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index cd30a30daa8b41..cfc32a93c4c3c0 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index 36efb44138d9a6..54ea4e17f4545c 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index 015c1f0a653222..e372bd833aac8d 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index 5d6d5d5c771b3a..e8969d216f945a 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -52,7 +52,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index ec9f5fb6190aa7..eb809ada9915db 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 49af0c85568c9b..f6ed830281e271 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") diff --git a/examples/pytorch/text-classification/run_glue_no_trainer.py b/examples/pytorch/text-classification/run_glue_no_trainer.py index fadb0148313e33..ff972a73bdf0d6 100644 --- a/examples/pytorch/text-classification/run_glue_no_trainer.py +++ b/examples/pytorch/text-classification/run_glue_no_trainer.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index d4cfc3a77d0b6d..2b512a2bb1b302 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index a272f25aa417ea..6541593c5d0e09 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") diff --git a/examples/pytorch/token-classification/run_ner_no_trainer.py b/examples/pytorch/token-classification/run_ner_no_trainer.py index 4aee8c7ebacbfd..b8c3a9eae8a755 100755 --- a/examples/pytorch/token-classification/run_ner_no_trainer.py +++ b/examples/pytorch/token-classification/run_ner_no_trainer.py @@ -55,7 +55,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index af1868b25aad35..9df352f63dd997 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -52,7 +52,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt") diff --git a/examples/pytorch/translation/run_translation_no_trainer.py b/examples/pytorch/translation/run_translation_no_trainer.py index 4c7ac38e601a59..8018652eae2c18 100644 --- a/examples/pytorch/translation/run_translation_no_trainer.py +++ b/examples/pytorch/translation/run_translation_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt") diff --git a/examples/tensorflow/multiple-choice/run_swag.py b/examples/tensorflow/multiple-choice/run_swag.py index 2684500d248db9..34d8281ac5a03e 100644 --- a/examples/tensorflow/multiple-choice/run_swag.py +++ b/examples/tensorflow/multiple-choice/run_swag.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/tensorflow/question-answering/run_qa.py b/examples/tensorflow/question-answering/run_qa.py index 7f53a9841509c7..9bba2e54e915ba 100755 --- a/examples/tensorflow/question-answering/run_qa.py +++ b/examples/tensorflow/question-answering/run_qa.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/tensorflow/summarization/run_summarization.py b/examples/tensorflow/summarization/run_summarization.py index 2cf6bdba604b8d..21ad8e4fa02af9 100644 --- a/examples/tensorflow/summarization/run_summarization.py +++ b/examples/tensorflow/summarization/run_summarization.py @@ -53,7 +53,7 @@ # region Checking dependencies # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/examples/tensorflow/text-classification/run_glue.py b/examples/tensorflow/text-classification/run_glue.py index d5a6b096b3467e..ab91e84a59653a 100644 --- a/examples/tensorflow/text-classification/run_glue.py +++ b/examples/tensorflow/text-classification/run_glue.py @@ -47,7 +47,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") task_to_keys = { "cola": ("sentence", None), diff --git a/examples/tensorflow/translation/run_translation.py b/examples/tensorflow/translation/run_translation.py index 7ccd089ca82dce..6632dbe9105867 100644 --- a/examples/tensorflow/translation/run_translation.py +++ b/examples/tensorflow/translation/run_translation.py @@ -56,7 +56,7 @@ # region Dependencies and constants # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.22.0.dev0") +check_min_version("4.23.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/setup.py b/setup.py index 799509cc9d2d31..27ab6efd69ef1e 100644 --- a/setup.py +++ b/setup.py @@ -400,7 +400,7 @@ def run(self): setup( name="transformers", - version="4.22.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) + version="4.23.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)", author_email="transformers@huggingface.co", description="State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow", diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index c6fd12595edfa3..3c3a3a50064162 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -22,7 +22,7 @@ # to defer the actual importing for when the objects are requested. This way `import transformers` provides the names # in the namespace without actually importing anything (and especially none of the backends). -__version__ = "4.22.0.dev0" +__version__ = "4.23.0.dev0" from typing import TYPE_CHECKING From abca1741cf065749c44ef5d77f6f632c50beb070 Mon Sep 17 00:00:00 2001 From: Hakjin Lee Date: Thu, 15 Sep 2022 08:21:06 +0900 Subject: [PATCH 300/539] Fix a broken link for deepspeed ZeRO inference in the docs (#19001) * Fix a broken link for deepspeed ZeRO inference * fix link Co-authored-by: Stas Bekman --- docs/source/en/main_classes/deepspeed.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/main_classes/deepspeed.mdx b/docs/source/en/main_classes/deepspeed.mdx index a0d6dcc7769e79..8fb819fee59799 100644 --- a/docs/source/en/main_classes/deepspeed.mdx +++ b/docs/source/en/main_classes/deepspeed.mdx @@ -49,7 +49,7 @@ Inference: 1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see: - [deepspeed-zero-inference](#deepspeed-zero-inference). + [zero-inference](#zero-inference). There is also DeepSpeed Inference - this is a totally different technology which uses Tensor Parallelism instead of ZeRO (coming soon). @@ -81,7 +81,7 @@ pip install transformers[deepspeed] or find more details on [the DeepSpeed's GitHub page](https://github.com/microsoft/deepspeed#installation) and [advanced install](https://www.deepspeed.ai/tutorials/advanced-install/). -If you're still struggling with the build, first make sure to read [zero-install-notes](#zero-install-notes). +If you're still struggling with the build, first make sure to read [CUDA Extension Installation Notes](trainer#cuda-extension-installation-notes). If you don't prebuild the extensions and rely on them to be built at run time and you tried all of the above solutions to no avail, the next thing to try is to pre-build the modules before installing them. From 8edf1963103127247ae3ef96fc5ba6a96eb4a290 Mon Sep 17 00:00:00 2001 From: Stas Bekman Date: Wed, 14 Sep 2022 16:29:58 -0700 Subject: [PATCH 301/539] [doc] debug: fix import (#19042) correct the import statement --- docs/source/en/debugging.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/debugging.mdx b/docs/source/en/debugging.mdx index 7339d61a057527..762e2a2a04e5cd 100644 --- a/docs/source/en/debugging.mdx +++ b/docs/source/en/debugging.mdx @@ -77,7 +77,7 @@ to the normal command line arguments, or pass `debug="underflow_overflow"` when If you're using your own training loop or another Trainer you can accomplish the same with: ```python -from .debug_utils import DebugUnderflowOverflow +from transformers.debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model) ``` @@ -271,7 +271,7 @@ Additionally, if you're instantiating the debugger in your own code, you can adj its default, e.g.: ```python -from .debug_utils import DebugUnderflowOverflow +from transformers.debug_utils import DebugUnderflowOverflow debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100) ``` From 7743caccb95acb26a8bcc8806bd697d8a882e786 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Thu, 15 Sep 2022 13:01:19 +0200 Subject: [PATCH 302/539] [bnb] Small improvements on utils (#18646) * Small replacement - replace `modules_to_not_convert` by `module_to_not_convert` * refactor a bit - changed variables name - now output a list - change error message * make style * add list * make style * change args name Co-authored-by: stas00 * fix comment * fix typo Co-authored-by: stas00 * Update src/transformers/modeling_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: stas00 Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- src/transformers/modeling_utils.py | 33 +++++++++++++++++++------- src/transformers/utils/bitsandbytes.py | 21 +++++++++++----- 2 files changed, 39 insertions(+), 15 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 2f305ff8dd098f..930e97fc324bfe 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1751,7 +1751,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P https://test.pypi.org/simple/ bitsandbytes-cudaXXX` where XXX is your CUDA version (e.g. 11.6 = 116). Make also sure that you have enough GPU RAM to store half of the model size since the 8bit modules are not compiled and adapted for CPUs. - int8_threshold (`float`, *optional*, defaults to 6): + load_in_8bit_threshold (`float`, *optional*, defaults to 6): Works together with `load_in_8bit`. This corresponds to the outlier threshold for outlier detection as described in `GPT3.int8() : 8-bit Matrix Multiplication for Transformers at Scale` paper. Any hidden states value that is above this threshold will be considered an outlier and the operation on those @@ -1761,6 +1761,9 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P quantization works well for values of magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models (small models, fine-tuning). + load_in_8bit_skip_modules (`List[str]`, *optional*): + An explicit list of the modules that we do not want to convert in 8-bit. This is useful for models such + as Jukebox that has several heads in different places and not necessarily at the last position. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. @@ -1852,7 +1855,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P offload_folder = kwargs.pop("offload_folder", None) offload_state_dict = kwargs.pop("offload_state_dict", False) load_in_8bit = kwargs.pop("load_in_8bit", False) - int8_threshold = kwargs.pop("int8_threshold", 6.0) + load_in_8bit_threshold = kwargs.pop("load_in_8bit_threshold", 6.0) + load_in_8bit_skip_modules = kwargs.pop("load_in_8bit_skip_modules", None) subfolder = kwargs.pop("subfolder", "") commit_hash = kwargs.pop("_commit_hash", None) @@ -2156,13 +2160,18 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P model = cls(config, *model_args, **model_kwargs) if load_in_8bit: - from .utils.bitsandbytes import get_key_to_not_convert, replace_8bit_linear + from .utils.bitsandbytes import get_keys_to_not_convert, replace_8bit_linear logger.info("Detected 8-bit loading: activating 8-bit loading for this model") - # We never convert lm_head or any last modules for numerical stability reasons - modules_to_not_convert = get_key_to_not_convert(model) - model = replace_8bit_linear(model, threshold=int8_threshold, modules_to_not_convert=modules_to_not_convert) + # We keep some modules such as the lm_head in their original dtype for numerical stability reasons + if load_in_8bit_skip_modules is None: + modules_to_not_convert = get_keys_to_not_convert(model) + else: + modules_to_not_convert = load_in_8bit_skip_modules + model = replace_8bit_linear( + model, threshold=load_in_8bit_threshold, modules_to_not_convert=modules_to_not_convert + ) if isinstance(device_map, str): if model._no_split_modules is None: @@ -2193,12 +2202,18 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P ) if load_in_8bit: - # The LM head can stay on disk / CPU + # The LM head / tied weights or any last module can stay on disk / CPU device_map_without_lm_head = { - key: device_map[key] for key in device_map.keys() if key != modules_to_not_convert + key: device_map[key] for key in device_map.keys() if key not in modules_to_not_convert } if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values(): - raise ValueError("8-bit operations on `bitsandbytes` are not supported under CPU!") + raise ValueError( + """ + Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit + the quantized model. If you have set a value for `max_memory` you should increase that. To have + an idea of the modules that are set on the CPU or RAM you can print model.hf_device_map. + """ + ) del device_map_without_lm_head if from_tf: diff --git a/src/transformers/utils/bitsandbytes.py b/src/transformers/utils/bitsandbytes.py index eca605b2edef94..b2339efd626953 100644 --- a/src/transformers/utils/bitsandbytes.py +++ b/src/transformers/utils/bitsandbytes.py @@ -114,7 +114,7 @@ def replace_8bit_linear(model, threshold=6.0, modules_to_not_convert="lm_head"): if len(list(module.children())) > 0: replace_8bit_linear(module, threshold, modules_to_not_convert) - if isinstance(module, nn.Linear) and name != modules_to_not_convert: + if isinstance(module, nn.Linear) and name not in modules_to_not_convert: with init_empty_weights(): model._modules[name] = bnb.nn.Linear8bitLt( module.in_features, @@ -126,10 +126,12 @@ def replace_8bit_linear(model, threshold=6.0, modules_to_not_convert="lm_head"): return model -def get_key_to_not_convert(model): +def get_keys_to_not_convert(model): r""" An utility function to get the key of the module to keep in full precision if any For example for CausalLM modules - we may want to keep the lm_head in full precision for numerical stability reasons. + we may want to keep the lm_head in full precision for numerical stability reasons. For other architectures, we want + to keep the tied weights of the model. The function will return a list of the keys of the modules to not convert in + int8. Parameters: model (`torch.nn.Module`): @@ -139,7 +141,9 @@ def get_key_to_not_convert(model): # check if it contains tied weights tied_model = deepcopy(model) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() - has_tied_params = len(find_tied_parameters(tied_model)) > 0 + + tied_keys = list(find_tied_parameters(tied_model).values()) + has_tied_params = len(tied_keys) > 0 # Check if it is a base model is_base_model = not hasattr(model, model.base_model_prefix) @@ -150,5 +154,10 @@ def get_key_to_not_convert(model): # otherwise they have an attached head list_modules = list(model.named_parameters()) - last_name = list_modules[-1][0] - return last_name.split(".")[0] + list_last_module = [list_modules[-1][0]] + + # add last module together with tied weights + intersection = set(list_last_module) - set(tied_keys) + list_untouched = tied_keys + list(intersection) + + return [module_name.split(".")[0] for module_name in list_untouched] From 30a28f5227d541ae6b0a287ae345dfae687f21da Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Thu, 15 Sep 2022 12:32:31 +0100 Subject: [PATCH 303/539] Update image segmentation pipeline test (#18731) * Updated test values The image segmentation pipeline tests - tests/pipelines/test_pipelines_image_segmentation.py - were failing after the merging of #1849 (49e44b216b2559e34e945d5dcdbbe2238859e29b). This was due to the difference in rescaling. Previously the images were rescaled by `image = image / 255`. In the new commit, a `rescale` method was added, and images rescaled using `image = image * scale`. This was known to cause small differences in the processed images (see [PR comment](https://github.com/huggingface/transformers/pull/18499#discussion_r940347575)). Testing locally, changing the `rescale` method to divide by a scale factor (255) resulted in the tests passing. It was therefore decided the test values could be updated, as there was no logic difference between the commits. * Use double quotes, like previous example * Fix up --- tests/pipelines/test_pipelines_image_segmentation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/test_pipelines_image_segmentation.py b/tests/pipelines/test_pipelines_image_segmentation.py index 1884682ec535c2..611e9d2ed11d60 100644 --- a/tests/pipelines/test_pipelines_image_segmentation.py +++ b/tests/pipelines/test_pipelines_image_segmentation.py @@ -255,7 +255,7 @@ def test_integration_torch_image_segmentation(self): self.assertEqual( nested_simplify(outputs, decimals=4), [ - {"score": 0.9094, "label": "blanket", "mask": "85144e4bf8d624c2c6175f7faf57eb30"}, + {"score": 0.9094, "label": "blanket", "mask": "6500201749480f87154fd967783b2b97"}, {"score": 0.9941, "label": "cat", "mask": "f3a7f80220788acc0245ebc084df6afc"}, {"score": 0.9987, "label": "remote", "mask": "7703408f54da1d0ebda47841da875e48"}, {"score": 0.9995, "label": "remote", "mask": "bd726918f10fed3efaef0091e11f923b"}, @@ -279,7 +279,7 @@ def test_integration_torch_image_segmentation(self): nested_simplify(outputs, decimals=4), [ [ - {"score": 0.9094, "label": "blanket", "mask": "85144e4bf8d624c2c6175f7faf57eb30"}, + {"score": 0.9094, "label": "blanket", "mask": "6500201749480f87154fd967783b2b97"}, {"score": 0.9941, "label": "cat", "mask": "f3a7f80220788acc0245ebc084df6afc"}, {"score": 0.9987, "label": "remote", "mask": "7703408f54da1d0ebda47841da875e48"}, {"score": 0.9995, "label": "remote", "mask": "bd726918f10fed3efaef0091e11f923b"}, @@ -287,7 +287,7 @@ def test_integration_torch_image_segmentation(self): {"score": 0.9994, "label": "cat", "mask": "fa5d8d5c329546ba5339f3095641ef56"}, ], [ - {"score": 0.9094, "label": "blanket", "mask": "85144e4bf8d624c2c6175f7faf57eb30"}, + {"score": 0.9094, "label": "blanket", "mask": "6500201749480f87154fd967783b2b97"}, {"score": 0.9941, "label": "cat", "mask": "f3a7f80220788acc0245ebc084df6afc"}, {"score": 0.9987, "label": "remote", "mask": "7703408f54da1d0ebda47841da875e48"}, {"score": 0.9995, "label": "remote", "mask": "bd726918f10fed3efaef0091e11f923b"}, From 0a42b61edec47acb8dabb64e5f0e9e97b0746a42 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 15 Sep 2022 15:21:57 +0200 Subject: [PATCH 304/539] Fix `test_save_load` for `TFViTMAEModelTest` (#19040) * Fix test_save_load for TFViTMAEModelTest Co-authored-by: ydshieh --- tests/models/vit_mae/test_modeling_tf_vit_mae.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/models/vit_mae/test_modeling_tf_vit_mae.py b/tests/models/vit_mae/test_modeling_tf_vit_mae.py index e9db7ea6b2729c..f05ecaf69c7afb 100644 --- a/tests/models/vit_mae/test_modeling_tf_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_tf_vit_mae.py @@ -375,7 +375,6 @@ def test_keras_save_load(self): # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test - @slow def test_save_load(self): # make mask reproducible np.random.seed(2) @@ -398,9 +397,8 @@ def test_save_load(self): out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: - model.save_pretrained(tmpdirname, saved_model=True) - saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") - model = tf.keras.models.load_model(saved_model_dir) + model.save_pretrained(tmpdirname, saved_model=False) + model = model_class.from_pretrained(tmpdirname) after_outputs = model(model_input, noise=noise) if model_class.__name__ == "TFViTMAEModel": From 9b80a0bc187f121ed778c8b4eb6909797eebac62 Mon Sep 17 00:00:00 2001 From: lewtun Date: Thu, 15 Sep 2022 15:22:31 +0200 Subject: [PATCH 305/539] Pin minimum PyTorch version for BLOOM ONNX export (#19046) --- src/transformers/models/bloom/configuration_bloom.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/transformers/models/bloom/configuration_bloom.py b/src/transformers/models/bloom/configuration_bloom.py index 10acdcbc68e154..4bcc6e2edbcfcc 100644 --- a/src/transformers/models/bloom/configuration_bloom.py +++ b/src/transformers/models/bloom/configuration_bloom.py @@ -16,6 +16,8 @@ from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional +from packaging import version + from transformers import is_torch_available @@ -154,6 +156,9 @@ def __init__( class BloomOnnxConfig(OnnxConfigWithPast): + + torch_onnx_minimum_version = version.parse("1.12") + def __init__( self, config: PretrainedConfig, From 2322eb8e2f9765cb73f59b324cc46a0e9cfe803f Mon Sep 17 00:00:00 2001 From: Matt Date: Thu, 15 Sep 2022 14:34:22 +0100 Subject: [PATCH 306/539] Update serving signatures and make sure we actually use them (#19034) * Override save() to use the serving signature as the default * Replace int32 with int64 in all our serving signatures * Remember one very important line so as not to break every test at once * Dtype fix for TFLED * dtype fix for shift_tokens_right in general * Dtype fixes in mBART and RAG * Fix dtypes for test_unpack_inputs * More dtype fixes * Yet more mBART + RAG dtype fixes * Yet more mBART + RAG dtype fixes * Add a check that the model actually has a serving method --- src/transformers/modeling_tf_utils.py | 41 +++++++++++++++++-- .../models/bart/modeling_tf_bart.py | 16 +++++--- .../models/bert/modeling_tf_bert.py | 6 +-- .../blenderbot/modeling_tf_blenderbot.py | 8 +++- .../modeling_tf_blenderbot_small.py | 8 +++- .../models/clip/modeling_tf_clip.py | 4 +- .../models/convbert/modeling_tf_convbert.py | 6 +-- .../distilbert/modeling_tf_distilbert.py | 4 +- .../models/dpr/modeling_tf_dpr.py | 4 +- .../models/funnel/modeling_tf_funnel.py | 4 +- .../models/gpt2/modeling_tf_gpt2.py | 4 +- .../models/gptj/modeling_tf_gptj.py | 4 +- .../models/hubert/modeling_tf_hubert.py | 4 +- .../layoutlmv3/modeling_tf_layoutlmv3.py | 6 +-- .../models/led/modeling_tf_led.py | 13 ++++-- .../models/lxmert/modeling_tf_lxmert.py | 8 ++-- .../models/marian/modeling_tf_marian.py | 8 +++- .../models/mbart/modeling_tf_mbart.py | 8 +++- .../models/mpnet/modeling_tf_mpnet.py | 4 +- .../models/openai/modeling_tf_openai.py | 4 +- .../models/opt/modeling_tf_opt.py | 4 +- .../models/pegasus/modeling_tf_pegasus.py | 8 +++- .../models/rag/modeling_tf_rag.py | 25 +++++++---- .../models/rembert/modeling_tf_rembert.py | 6 +-- .../models/roberta/modeling_tf_roberta.py | 4 +- .../models/roformer/modeling_tf_roformer.py | 6 +-- .../modeling_tf_speech_to_text.py | 14 ++++--- src/transformers/models/t5/modeling_tf_t5.py | 8 ++-- .../models/tapas/modeling_tf_tapas.py | 4 +- .../transfo_xl/modeling_tf_transfo_xl.py | 2 +- .../models/wav2vec2/modeling_tf_wav2vec2.py | 4 +- .../models/xglm/modeling_tf_xglm.py | 4 +- .../models/xlnet/modeling_tf_xlnet.py | 6 +-- ...tf_{{cookiecutter.lowercase_modelname}}.py | 11 +++-- tests/test_modeling_tf_common.py | 6 +-- 35 files changed, 179 insertions(+), 97 deletions(-) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 160a68c9209dd7..bbcbf125d8d4f1 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -560,6 +560,18 @@ def input_processing(func, config, **kwargs): if "kwargs" in output: del output["kwargs"] + cast_output = dict() + for key, val in output.items(): + if isinstance(val, tf.Tensor) and val.dtype == tf.int32: + cast_output[key] = tf.cast(val, tf.int64) + elif isinstance(val, np.ndarray) and val.dtype == np.int32: + cast_output[key] = val.astype(np.int64) + else: + cast_output[key] = val + + output = cast_output + del cast_output + if config is not None: boolean_dict = { k: v @@ -1054,9 +1066,9 @@ def _from_config(cls, config, **kwargs): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + "token_type_ids": tf.TensorSpec((None, None), tf.int64, name="token_type_ids"), } ] ) @@ -1082,6 +1094,29 @@ def serving_output(output): """ raise NotImplementedError + def save( + self, + filepath, + overwrite=True, + include_optimizer=True, + save_format=None, + signatures=None, + options=None, + save_traces=True, + ): + # Very simple wrapper that ensures we set the correct serving signature when saving + if signatures is None and hasattr(self, "serving"): + signatures = self.serving + super().save( + filepath, + overwrite=overwrite, + include_optimizer=include_optimizer, + save_format=save_format, + signatures=signatures, + options=options, + save_traces=save_traces, + ) + def get_input_embeddings(self) -> tf.keras.layers.Layer: """ Returns the model's input embeddings layer. diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index 3f448b4d2e163b..f4e9532817cdf6 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -64,11 +64,15 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) - start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( - shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" @@ -475,10 +479,10 @@ def dummy_inputs(self): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), - "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"), - "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + "decoder_input_ids": tf.TensorSpec((None, None), tf.int64, name="decoder_input_ids"), + "decoder_attention_mask": tf.TensorSpec((None, None), tf.int64, name="decoder_attention_mask"), } ] ) diff --git a/src/transformers/models/bert/modeling_tf_bert.py b/src/transformers/models/bert/modeling_tf_bert.py index 8ab88b9730418d..b1240b47639de3 100644 --- a/src/transformers/models/bert/modeling_tf_bert.py +++ b/src/transformers/models/bert/modeling_tf_bert.py @@ -1799,9 +1799,9 @@ def call( @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"), + "input_ids": tf.TensorSpec((None, None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None, None), tf.int64, name="attention_mask"), + "token_type_ids": tf.TensorSpec((None, None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py index 7843e057f3c986..c56b30cf83a25e 100644 --- a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py @@ -66,11 +66,15 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) - start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( - shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" diff --git a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py index 2f1a94ba9606ba..8383dc097295c7 100644 --- a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py @@ -65,11 +65,15 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) - start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( - shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" diff --git a/src/transformers/models/clip/modeling_tf_clip.py b/src/transformers/models/clip/modeling_tf_clip.py index 94656a0b39ab51..df490062101792 100644 --- a/src/transformers/models/clip/modeling_tf_clip.py +++ b/src/transformers/models/clip/modeling_tf_clip.py @@ -1097,8 +1097,8 @@ def call( @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/convbert/modeling_tf_convbert.py b/src/transformers/models/convbert/modeling_tf_convbert.py index 81e3b0b019c097..f62718af5fe590 100644 --- a/src/transformers/models/convbert/modeling_tf_convbert.py +++ b/src/transformers/models/convbert/modeling_tf_convbert.py @@ -1127,9 +1127,9 @@ def call( @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"), + "input_ids": tf.TensorSpec((None, None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None, None), tf.int64, name="attention_mask"), + "token_type_ids": tf.TensorSpec((None, None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/src/transformers/models/distilbert/modeling_tf_distilbert.py b/src/transformers/models/distilbert/modeling_tf_distilbert.py index 64c3338b648175..bc1df7f41aeb43 100644 --- a/src/transformers/models/distilbert/modeling_tf_distilbert.py +++ b/src/transformers/models/distilbert/modeling_tf_distilbert.py @@ -424,8 +424,8 @@ class TFDistilBertPreTrainedModel(TFPreTrainedModel): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/dpr/modeling_tf_dpr.py b/src/transformers/models/dpr/modeling_tf_dpr.py index 96ee761b819d08..c166cd04868986 100644 --- a/src/transformers/models/dpr/modeling_tf_dpr.py +++ b/src/transformers/models/dpr/modeling_tf_dpr.py @@ -376,8 +376,8 @@ class TFDPRPretrainedReader(TFPreTrainedModel): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/funnel/modeling_tf_funnel.py b/src/transformers/models/funnel/modeling_tf_funnel.py index 2bf59d0c3dbea3..da71be87d95d18 100644 --- a/src/transformers/models/funnel/modeling_tf_funnel.py +++ b/src/transformers/models/funnel/modeling_tf_funnel.py @@ -1511,9 +1511,9 @@ def call( @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.float32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), + "token_type_ids": tf.TensorSpec((None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/src/transformers/models/gpt2/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py index a876511fff3db2..5b1e21a3c22c1f 100644 --- a/src/transformers/models/gpt2/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -548,8 +548,8 @@ def dummy_inputs(self): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/gptj/modeling_tf_gptj.py b/src/transformers/models/gptj/modeling_tf_gptj.py index 943d9b1fff0410..67d17cdc05f80e 100644 --- a/src/transformers/models/gptj/modeling_tf_gptj.py +++ b/src/transformers/models/gptj/modeling_tf_gptj.py @@ -527,8 +527,8 @@ def dummy_inputs(self): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/hubert/modeling_tf_hubert.py b/src/transformers/models/hubert/modeling_tf_hubert.py index e9cf9adb76f674..ec7458a7ebb3e0 100644 --- a/src/transformers/models/hubert/modeling_tf_hubert.py +++ b/src/transformers/models/hubert/modeling_tf_hubert.py @@ -1312,8 +1312,8 @@ def __init__(self, config, *inputs, **kwargs): input_signature=[ { "input_values": tf.TensorSpec((None, None), tf.float32, name="input_values"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + "token_type_ids": tf.TensorSpec((None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py index 418ab2b1486097..6bb3eb54e195b5 100644 --- a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py @@ -988,10 +988,10 @@ def dummy_inputs(self) -> Dict[str, tf.Tensor]: @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "bbox": tf.TensorSpec((None, None, 4), tf.int32, name="bbox"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "bbox": tf.TensorSpec((None, None, 4), tf.int64, name="bbox"), "pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index a91f769fbf5f43..3702f1cca3bf72 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -55,16 +55,23 @@ LARGE_NEGATIVE = -1e8 +# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): - start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) + pad_token_id = tf.cast(pad_token_id, input_ids.dtype) + decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( - shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0)) + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): diff --git a/src/transformers/models/lxmert/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py index 88535aee12048c..3b44056a6cede5 100644 --- a/src/transformers/models/lxmert/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -800,12 +800,12 @@ def dummy_inputs(self) -> Dict[str, tf.Tensor]: @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), "visual_feats": tf.TensorSpec((None, None, None), tf.float32, name="visual_feats"), "visual_pos": tf.TensorSpec((None, None, None), tf.float32, name="visual_pos"), - "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"), - "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), + "visual_attention_mask": tf.TensorSpec((None, None), tf.int64, name="visual_attention_mask"), + "token_type_ids": tf.TensorSpec((None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index 01522346d8fe29..580c38f843d300 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -65,11 +65,15 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) - start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( - shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index 0b7a81aa33c18b..ec034a5fa8e470 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -69,11 +69,15 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int): if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` - input_ids = tf.where(input_ids == -100, tf.fill(shape_list(input_ids), pad_token_id), input_ids) + input_ids = tf.where( + input_ids == -100, tf.fill(shape_list(input_ids), tf.cast(pad_token_id, input_ids.dtype)), input_ids + ) language_id_index = ( tf.reduce_sum(tf.cast(tf.math.not_equal(input_ids, pad_token_id), dtype=input_ids.dtype), axis=-1) - 1 ) - language_id_index = tf.stack([tf.range(shape_list(input_ids)[0]), language_id_index], axis=-1) + language_id_index = tf.stack( + [tf.range(shape_list(input_ids)[0], dtype=input_ids.dtype), language_id_index], axis=-1 + ) languages_ids = tf.gather_nd(input_ids, language_id_index) shifted_input_ids = tf.concat([tf.expand_dims(languages_ids, axis=-1), input_ids[:, :-1]], axis=-1) diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index 7b63dff489043c..3fc43184617063 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -76,8 +76,8 @@ class TFMPNetPreTrainedModel(TFPreTrainedModel): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/openai/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py index e1bb1a5f6f6c1e..357866228b83a0 100644 --- a/src/transformers/models/openai/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -359,8 +359,8 @@ class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/opt/modeling_tf_opt.py b/src/transformers/models/opt/modeling_tf_opt.py index 8be1a8f091a29c..b523ecceb1078d 100644 --- a/src/transformers/models/opt/modeling_tf_opt.py +++ b/src/transformers/models/opt/modeling_tf_opt.py @@ -425,8 +425,8 @@ def dummy_inputs(self): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/pegasus/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py index 63017efb0353a4..97efed9285fc06 100644 --- a/src/transformers/models/pegasus/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -65,11 +65,15 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) - start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( - shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" diff --git a/src/transformers/models/rag/modeling_tf_rag.py b/src/transformers/models/rag/modeling_tf_rag.py index a31b2d45217e8d..08a9adf591aae2 100644 --- a/src/transformers/models/rag/modeling_tf_rag.py +++ b/src/transformers/models/rag/modeling_tf_rag.py @@ -1301,17 +1301,18 @@ def shift_tokens_right(self, input_ids, start_token_id=None): pad_token_id = self.generator.config.pad_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." - shifted_input_ids = tf.cast(input_ids, tf.int32) - start_tokens = tf.fill((shape_list(shifted_input_ids)[0], 1), start_token_id) - shifted_input_ids = tf.concat([start_tokens, shifted_input_ids[:, :-1]], -1) + start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.cast(start_token_id, input_ids.dtype)) + shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( - shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.cast(pad_token_id, input_ids.dtype)), + shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, tf.int32)) + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, shifted_input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): @@ -1324,7 +1325,10 @@ def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0 n_docs = n_docs if n_docs is not None else self.config.n_docs # shift tokens left (from original Pytorch's version) - target = tf.concat([target[:, 1:], tf.fill([target.shape[0], 1], self.config.generator.pad_token_id)], axis=1) + target = tf.concat( + [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], + axis=1, + ) rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs) loss = self.hf_compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss) @@ -1571,7 +1575,10 @@ def get_nll( self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None ): # shift tokens left - target = tf.concat([target[:, 1:], tf.fill([target.shape[0], 1], self.config.generator.pad_token_id)], axis=1) + target = tf.concat( + [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))], + axis=1, + ) # bos_token_id is None for T5 bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id @@ -1580,7 +1587,7 @@ def get_nll( use_bos = bos_token_id is not None and equal_bos_token_id_all def _mask_pads(ll, smooth_obj): - pad_mask = tf.equal(target, self.config.generator.pad_token_id) + pad_mask = tf.equal(target, tf.cast(self.config.generator.pad_token_id, target.dtype)) if tf.reduce_any(pad_mask): ll = tf.where(pad_mask, 0.0, ll) smooth_obj = tf.where(pad_mask, 0.0, smooth_obj) @@ -1611,7 +1618,7 @@ def _mask_pads(ll, smooth_obj): def torch_gather(param, id_tensor): # 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather def gather2d(target, id_tensor): - idx = tf.stack([tf.range(tf.shape(id_tensor)[0]), id_tensor[:, 0]], axis=-1) + idx = tf.stack([tf.range(tf.shape(id_tensor)[0], dtype=id_tensor.dtype), id_tensor[:, 0]], axis=-1) result = tf.gather_nd(target, idx) return tf.expand_dims(result, axis=-1) diff --git a/src/transformers/models/rembert/modeling_tf_rembert.py b/src/transformers/models/rembert/modeling_tf_rembert.py index aea9fa325b9930..be1946f1118be7 100644 --- a/src/transformers/models/rembert/modeling_tf_rembert.py +++ b/src/transformers/models/rembert/modeling_tf_rembert.py @@ -1435,9 +1435,9 @@ def call( @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"), + "input_ids": tf.TensorSpec((None, None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None, None), tf.int64, name="attention_mask"), + "token_type_ids": tf.TensorSpec((None, None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index c2e1477c7ee265..04f4283b2070dc 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -798,8 +798,8 @@ def dummy_inputs(self): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/roformer/modeling_tf_roformer.py b/src/transformers/models/roformer/modeling_tf_roformer.py index 852b1424b406bb..4c526b694133cb 100644 --- a/src/transformers/models/roformer/modeling_tf_roformer.py +++ b/src/transformers/models/roformer/modeling_tf_roformer.py @@ -1211,9 +1211,9 @@ def call( @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"), + "input_ids": tf.TensorSpec((None, None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None, None), tf.int64, name="attention_mask"), + "token_type_ids": tf.TensorSpec((None, None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py index ccae37b3eb5378..e6e1b0facc0116 100755 --- a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py @@ -67,11 +67,15 @@ def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) - start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( - shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" @@ -591,9 +595,9 @@ def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor): input_signature=[ { "input_features": tf.TensorSpec((None, None, None), tf.float32, name="input_features"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), - "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"), - "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + "decoder_input_ids": tf.TensorSpec((None, None), tf.int64, name="decoder_input_ids"), + "decoder_attention_mask": tf.TensorSpec((None, None), tf.int64, name="decoder_attention_mask"), } ] ) diff --git a/src/transformers/models/t5/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py index b8a9e86ac99229..ae39ea26150dba 100644 --- a/src/transformers/models/t5/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -872,10 +872,10 @@ def dummy_inputs(self): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), - "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"), - "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + "decoder_input_ids": tf.TensorSpec((None, None), tf.int64, name="decoder_input_ids"), + "decoder_attention_mask": tf.TensorSpec((None, None), tf.int64, name="decoder_attention_mask"), } ] ) diff --git a/src/transformers/models/tapas/modeling_tf_tapas.py b/src/transformers/models/tapas/modeling_tf_tapas.py index ea379a039d5a7e..48c26a138d6408 100644 --- a/src/transformers/models/tapas/modeling_tf_tapas.py +++ b/src/transformers/models/tapas/modeling_tf_tapas.py @@ -865,9 +865,9 @@ class TFTapasPreTrainedModel(TFPreTrainedModel): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.float32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"), + "token_type_ids": tf.TensorSpec((None, None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py index b0d26e6edf5191..53370c1d3e7cfc 100644 --- a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py @@ -686,7 +686,7 @@ class TFTransfoXLPreTrainedModel(TFPreTrainedModel): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), } ] ) diff --git a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py index 58110b51207d16..6fee92e672d66b 100644 --- a/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_tf_wav2vec2.py @@ -1345,8 +1345,8 @@ def __init__(self, config, *inputs, **kwargs): input_signature=[ { "input_values": tf.TensorSpec((None, None), tf.float32, name="input_values"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + "token_type_ids": tf.TensorSpec((None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/src/transformers/models/xglm/modeling_tf_xglm.py b/src/transformers/models/xglm/modeling_tf_xglm.py index ac11e7ae7c68c9..6dd62d270b8e23 100644 --- a/src/transformers/models/xglm/modeling_tf_xglm.py +++ b/src/transformers/models/xglm/modeling_tf_xglm.py @@ -636,8 +636,8 @@ def dummy_inputs(self): @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), } ] ) diff --git a/src/transformers/models/xlnet/modeling_tf_xlnet.py b/src/transformers/models/xlnet/modeling_tf_xlnet.py index 739ad50ecb644f..1b079d00be2b43 100644 --- a/src/transformers/models/xlnet/modeling_tf_xlnet.py +++ b/src/transformers/models/xlnet/modeling_tf_xlnet.py @@ -1563,9 +1563,9 @@ def call( @tf.function( input_signature=[ { - "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), - "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), - "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"), + "input_ids": tf.TensorSpec((None, None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None, None), tf.int64, name="attention_mask"), + "token_type_ids": tf.TensorSpec((None, None, None), tf.int64, name="token_type_ids"), } ] ) diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index 0d025ca98c50d4..8225105ddf134f 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -1685,16 +1685,21 @@ def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAn LARGE_NEGATIVE = -1e8 +# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): - start_tokens = tf.fill((shape_list(input_ids)[0], 1), decoder_start_token_id) + pad_token_id = tf.cast(pad_token_id, input_ids.dtype) + decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) + start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( - shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), pad_token_id), shifted_input_ids + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" - assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0)) + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=shifted_input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index ca8840d2aafc15..0c55b4d8ed31bf 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -1887,9 +1887,9 @@ def foo(self, pixel_values, output_attentions=None, output_hidden_states=None, r return pixel_values, output_attentions, output_hidden_states, return_dict dummy_model = DummyModel() - input_ids = tf.constant([0, 1, 2, 3]) - past = tf.constant([4, 5, 6, 7]) - pixel_values = tf.constant([8, 9, 10, 11]) + input_ids = tf.constant([0, 1, 2, 3], dtype=tf.int64) + past = tf.constant([4, 5, 6, 7], dtype=tf.int64) + pixel_values = tf.constant([8, 9, 10, 11], dtype=tf.int64) # test case 1: Pass inputs as keyword arguments; Booleans are inherited from the config. output = dummy_model.call(input_ids=input_ids, past=past) From 2700ba66d9bbf83218c9e4de5effdadf36b48842 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 15 Sep 2022 09:39:59 -0400 Subject: [PATCH 307/539] Move cache: expand error message (#19051) --- src/transformers/utils/hub.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 8bdf360b029cda..ed2271119e522f 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -1128,9 +1128,9 @@ def move_cache(cache_dir=None, new_cache_dir=None, token=None): except Exception as e: trace = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( - f"There was a problem when trying to move your cache:\n\n{trace}\n\nPlease file an issue at " - "https://github.com/huggingface/transformers/issues/new/choose and copy paste this whole message and we " - "will do our best to help." + f"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " + "file an issue at https://github.com/huggingface/transformers/issues/new/choose and copy paste this whole " + "message and we will do our best to help." ) try: From 578e18e0028c26d8988413ef270da1b292e87dc2 Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Thu, 15 Sep 2022 19:20:11 +0530 Subject: [PATCH 308/539] =?UTF-8?q?=F0=9F=9A=A8=F0=9F=9A=A8=F0=9F=9A=A8=20?= =?UTF-8?q?Optimize=20Top=20P=20Sampler=20and=20fix=20edge=20case=20(#1898?= =?UTF-8?q?4)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * init PR * optimize top p and add edge case * styling * style * revert tf and flax test * add edge case test for FLAX and TF * update doc with smallest set sampling for top p * make style --- .../generation_flax_logits_process.py | 4 ++-- src/transformers/generation_logits_process.py | 15 ++++++--------- src/transformers/generation_tf_logits_process.py | 4 ++-- src/transformers/generation_utils.py | 4 ++-- .../test_generation_flax_logits_process.py | 4 ++-- .../generation/test_generation_logits_process.py | 4 ++-- .../test_generation_tf_logits_process.py | 7 +++++-- 7 files changed, 21 insertions(+), 21 deletions(-) diff --git a/src/transformers/generation_flax_logits_process.py b/src/transformers/generation_flax_logits_process.py index b41da1b9b2f450..6e8b4b63432193 100644 --- a/src/transformers/generation_flax_logits_process.py +++ b/src/transformers/generation_flax_logits_process.py @@ -118,8 +118,8 @@ class FlaxTopPLogitsWarper(FlaxLogitsWarper): Args: top_p (`float`): - If set to < 1, only the most probable tokens with probabilities that add up to `top_p` or higher are kept - for generation. + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): diff --git a/src/transformers/generation_logits_process.py b/src/transformers/generation_logits_process.py index 638815dced16b3..35ca6c57311d63 100644 --- a/src/transformers/generation_logits_process.py +++ b/src/transformers/generation_logits_process.py @@ -173,8 +173,8 @@ class TopPLogitsWarper(LogitsWarper): Args: top_p (`float`): - If set to < 1, only the most probable tokens with probabilities that add up to `top_p` or higher are kept - for generation. + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): @@ -191,17 +191,14 @@ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: - sorted_logits, sorted_indices = torch.sort(scores, descending=True) + sorted_logits, sorted_indices = torch.sort(scores, descending=False) cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) - sorted_indices_to_remove = cumulative_probs > self.top_p + sorted_indices_to_remove = cumulative_probs <= (1 - self.top_p) if self.min_tokens_to_keep > 1: - # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) - sorted_indices_to_remove[..., : self.min_tokens_to_keep - 1] = 0 - # Shift the indices to the right to keep also the first token above the threshold - sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() - sorted_indices_to_remove[..., 0] = 0 + # Keep at least min_tokens_to_keep + sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) diff --git a/src/transformers/generation_tf_logits_process.py b/src/transformers/generation_tf_logits_process.py index f17ed04686860b..b09330e10b34d4 100644 --- a/src/transformers/generation_tf_logits_process.py +++ b/src/transformers/generation_tf_logits_process.py @@ -150,8 +150,8 @@ class TFTopPLogitsWarper(TFLogitsWarper): Args: top_p (`float`): - If set to < 1, only the most probable tokens with probabilities that add up to `top_p` or higher are kept - for generation. + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index b0e7b1411626e9..10f15304f48c4f 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -990,8 +990,8 @@ def generate( top_k (`int`, *optional*, defaults to `model.config.top_k` or 50 if the config does not set any value): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`, *optional*, defaults to `model.config.top_p` or 1.0 if the config does not set any value): - If set to float < 1, only the most probable tokens with probabilities that add up to `top_p` or higher - are kept for generation. + If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to + `top_p` or higher are kept for generation. typical_p (`float`, *optional*, defaults to `model.config.typical_p` or 1.0 if the config does not set any value): The amount of probability mass from the original distribution to be considered in typical decoding. If set to 1.0 it takes no effect. See [this paper](https://arxiv.org/pdf/2202.00666.pdf) for more details. diff --git a/tests/generation/test_generation_flax_logits_process.py b/tests/generation/test_generation_flax_logits_process.py index aea44252d90f57..3a04f7e38f3d74 100644 --- a/tests/generation/test_generation_flax_logits_process.py +++ b/tests/generation/test_generation_flax_logits_process.py @@ -110,10 +110,10 @@ def test_top_p_dist_warper(self): # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) - top_p_warp = FlaxTopPLogitsWarper(0.7) + top_p_warp = FlaxTopPLogitsWarper(0.8) filtered_dist = np.exp(top_p_warp(input_ids, dist, cur_len=None)) - # dist should be filtered to keep min num values so that sum is >= 0.7 + # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3)) diff --git a/tests/generation/test_generation_logits_process.py b/tests/generation/test_generation_logits_process.py index 7a515d3e927214..396fccd009bc6f 100644 --- a/tests/generation/test_generation_logits_process.py +++ b/tests/generation/test_generation_logits_process.py @@ -169,10 +169,10 @@ def test_top_p_dist_warper(self): torch.tensor([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float) ) - top_p_warp = TopPLogitsWarper(0.7) + top_p_warp = TopPLogitsWarper(0.8) filtered_dist = torch.exp(top_p_warp(input_ids, dist)) - # dist should be filtered to keep min num values so that sum is >= 0.7 + # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = torch.tensor( [[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], device=torch_device, dtype=torch.float diff --git a/tests/generation/test_generation_tf_logits_process.py b/tests/generation/test_generation_tf_logits_process.py index be60335ef2f845..676a392204d0cc 100644 --- a/tests/generation/test_generation_tf_logits_process.py +++ b/tests/generation/test_generation_tf_logits_process.py @@ -189,12 +189,15 @@ def test_top_p_dist_warper(self, use_xla): # create distribution and take log (inverse to Softmax as taken in TFTopPLogitsWarper) dist = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]], dtype=np.float32)) - top_p_warp = TFTopPLogitsWarper(0.7) + # top_p should have been 0.8 to test the edge case of top_p being exactly equal to sum of some token prob + # However, due to the numerical instability of softmax in TF we choose this as the edge case + # top_p as 0.8 passes when use_xla is True and fails when False. Refer PR #18984. + top_p_warp = TFTopPLogitsWarper(0.79999995) if use_xla: top_p_warp = tf.function(top_p_warp, jit_compile=True) filtered_dist = tf.exp(top_p_warp(input_ids, dist, cur_len)) - # dist should be filtered to keep min num values so that sum is >= 0.7 + # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 EXPECTED_FILTERED_DIST = tf.constant([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]], dtype=tf.float32) tf.debugging.assert_near(filtered_dist, EXPECTED_FILTERED_DIST, atol=1e-3) From 68bb33d7704335b59ebacf35ff570c127d77ff20 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Thu, 15 Sep 2022 17:12:58 +0200 Subject: [PATCH 309/539] Fixing OPT fast tokenizer option. (#18753) * Fixing OPT fast tokenizer option. * Remove dependency on `pt`. * Move it to GPT2 tokenization tests. * Added a few tests. --- src/transformers/convert_slow_tokenizer.py | 16 +++++- .../models/gpt2/tokenization_gpt2_fast.py | 11 +--- tests/models/gpt2/test_tokenization_gpt2.py | 56 ++++++++++++++++++- 3 files changed, 70 insertions(+), 13 deletions(-) diff --git a/src/transformers/convert_slow_tokenizer.py b/src/transformers/convert_slow_tokenizer.py index 427ce3516591c3..6fbd7b49b066a0 100644 --- a/src/transformers/convert_slow_tokenizer.py +++ b/src/transformers/convert_slow_tokenizer.py @@ -282,8 +282,20 @@ def converted(self) -> Tokenizer: tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() - tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) - + if self.original_tokenizer.add_bos_token: + bos = self.original_tokenizer.bos_token + bos_token_id = self.original_tokenizer.bos_token_id + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{bos}:0 $A:0", # token_type_id is 2 for Funnel transformer + pair=f"{bos}:0 $A:0 $B:1", + special_tokens=[ + (bos, bos_token_id), + ], + ) + else: + # XXX trim_offsets=False actually means this post_processor doesn't + # really do anything. + tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer diff --git a/src/transformers/models/gpt2/tokenization_gpt2_fast.py b/src/transformers/models/gpt2/tokenization_gpt2_fast.py index ddd4ad56fde18a..eefd35aa94a7a9 100644 --- a/src/transformers/models/gpt2/tokenization_gpt2_fast.py +++ b/src/transformers/models/gpt2/tokenization_gpt2_fast.py @@ -146,16 +146,7 @@ def __init__( **kwargs, ) - if kwargs.pop("add_bos_token", False): - model_id = kwargs.pop("name_or_path", "") - raise ValueError( - "Currenty GPT2's fast tokenizer does NOT support adding a BOS token." - "Instead you should use GPT2's slow tokenizer class `GPT2Tokenizer` as follows: \n" - f"`GPT2Tokenizer.from_pretrained('{model_id}')`\nor\n" - f"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n" - "This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005." - " so that the fast tokenizer works correctly." - ) + self.add_bos_token = kwargs.pop("add_bos_token", False) pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: diff --git a/tests/models/gpt2/test_tokenization_gpt2.py b/tests/models/gpt2/test_tokenization_gpt2.py index 3b2272d8548bc7..3273fbfce77378 100644 --- a/tests/models/gpt2/test_tokenization_gpt2.py +++ b/tests/models/gpt2/test_tokenization_gpt2.py @@ -18,7 +18,7 @@ import os import unittest -from transformers import GPT2Tokenizer, GPT2TokenizerFast +from transformers import AutoTokenizer, GPT2Tokenizer, GPT2TokenizerFast from transformers.models.gpt2.tokenization_gpt2 import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers @@ -275,3 +275,57 @@ def test_special_tokens_mask_input_pairs_and_bos_token(self): ] filtered_sequence = [x for x in filtered_sequence if x is not None] self.assertEqual(encoded_sequence, filtered_sequence) + + +@require_tokenizers +class OPTTokenizationTest(unittest.TestCase): + def test_serialize_deserialize_fast_opt(self): + # More context: + # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 + # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 + # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 + + tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True) + text = "A photo of a cat" + + tokens_ids = tokenizer.encode( + text, + ) + self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) + tokenizer.save_pretrained("test_opt") + + tokenizer = AutoTokenizer.from_pretrained("./test_opt") + tokens_ids = tokenizer.encode( + text, + ) + self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) + + def test_fast_slow_equivalence(self): + tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", use_slow=True) + text = "A photo of a cat" + + tokens_ids = tokenizer.encode( + text, + ) + # Same as above + self.assertEqual(tokens_ids, [2, 250, 1345, 9, 10, 4758]) + + def test_users_can_modify_bos(self): + tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m", from_slow=True) + + tokenizer.bos_token = "bos" + tokenizer.bos_token_id = tokenizer.get_vocab()["bos"] + + text = "A photo of a cat" + tokens_ids = tokenizer.encode( + text, + ) + # We changed the bos token + self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758]) + tokenizer.save_pretrained("./tok") + tokenizer = AutoTokenizer.from_pretrained("./tok") + self.assertTrue(tokenizer.is_fast) + tokens_ids = tokenizer.encode( + text, + ) + self.assertEqual(tokens_ids, [31957, 250, 1345, 9, 10, 4758]) From f7ce4f1ff789c11f129597a1171b5d549d102e09 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 15 Sep 2022 11:31:09 -0400 Subject: [PATCH 310/539] Fix custom tokenizers test (#19052) * Fix CI for custom tokenizers * Add nightly tests * Run CI, run! * Fix paths * Typos * Fix test --- .circleci/config.yml | 74 ++++++++++++++++++++++++++++--- setup.py | 1 + tests/test_tokenization_common.py | 5 ++- 3 files changed, 74 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 556a97479cef6a..7ff545b2a39a0c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -578,19 +578,45 @@ jobs: key: v0.5-custom_tokenizers-{{ checksum "setup.py" }} paths: - '~/.cache/pip' + - run: python utils/tests_fetcher.py | tee test_preparation.txt + - store_artifacts: + path: ~/transformers/test_preparation.txt - run: | if [ -f test_list.txt ]; then - python -m pytest --max-worker-restart=0 -s --make-reports=tests_custom_tokenizers ./tests/test_tokenization_bert_japanese.py ./tests/test_tokenization_openai.py | tee tests_output.txt - fi - - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 1 --max-worker-restart=0 tests/test_tokenization_clip.py --dist=loadfile -s --make-reports=tests_tokenization_clip --durations=100 | tee tests_output.txt + python -m pytest --max-worker-restart=0 -s --make-reports=tests_custom_tokenizers ./tests/models/bert_japanese/test_tokenization_bert_japanese.py ./tests/models/openai/test_tokenization_openai.py ./tests/models/clip/test_tokenization_clip.py | tee tests_output.txt fi - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: path: ~/transformers/reports + run_tests_custom_tokenizers_all: + working_directory: ~/transformers + docker: + - image: cimg/python:3.7.12 + environment: + RUN_CUSTOM_TOKENIZERS: yes + TRANSFORMERS_IS_CI: yes + PYTEST_TIMEOUT: 120 + steps: + - checkout + - restore_cache: + keys: + - v0.5-custom_tokenizers-{{ checksum "setup.py" }} + - v0.5-{{ checksum "setup.py" }} + - run: pip install --upgrade pip + - run: pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba] + - run: python -m unidic download + - save_cache: + key: v0.5-custom_tokenizers-{{ checksum "setup.py" }} + paths: + - '~/.cache/pip' + - run: python -m pytest --max-worker-restart=0 -s --make-reports=tests_custom_tokenizers ./tests/models/bert_japanese/test_tokenization_bert_japanese.py ./tests/models/openai/test_tokenization_openai.py ./tests/models/clip/test_tokenization_clip.py | tee tests_output.txt + - store_artifacts: + path: ~/transformers/tests_output.txt + - store_artifacts: + path: ~/transformers/reports + run_examples_torch: working_directory: ~/transformers docker: @@ -1026,6 +1052,42 @@ jobs: - store_artifacts: path: ~/transformers/reports + run_tests_layoutlmv2_and_v3_all: + working_directory: ~/transformers + docker: + - image: cimg/python:3.7.12 + environment: + OMP_NUM_THREADS: 1 + TRANSFORMERS_IS_CI: yes + PYTEST_TIMEOUT: 120 + resource_class: xlarge + parallelism: 1 + steps: + - checkout + - restore_cache: + keys: + - v0.5-torch-{{ checksum "setup.py" }} + - v0.5-{{ checksum "setup.py" }} + - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev + - run: pip install --upgrade pip + - run: pip install .[torch,testing,vision] + - run: pip install torchvision + # The commit `36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0` in `detectron2` break things. + # See https://github.com/facebookresearch/detectron2/commit/36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0#comments. + # TODO: Revert this change back once the above issue is fixed. + - run: python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' + - run: sudo apt install tesseract-ocr + - run: pip install pytesseract + - save_cache: + key: v0.5-torch-{{ checksum "setup.py" }} + paths: + - '~/.cache/pip' + - run: python -m pytest -n 1 --max-worker-restart=0 tests/models/*layoutlmv* --dist=loadfile -s --make-reports=tests_layoutlmv2_and_v3 --durations=100 + - store_artifacts: + path: ~/transformers/tests_output.txt + - store_artifacts: + path: ~/transformers/reports + # TPU JOBS run_examples_tpu: docker: @@ -1094,6 +1156,7 @@ workflows: - run_examples_torch_all - run_examples_tensorflow_all - run_examples_flax_all + - run_tests_custom_tokenizers_all - run_tests_torch_and_tf_all - run_tests_torch_and_flax_all - run_tests_torch_all @@ -1103,6 +1166,7 @@ workflows: - run_tests_pipelines_tf_all - run_tests_onnxruntime_all - run_tests_hub_all + - run_tests_layoutlmv2_and_v3_all # tpu_testing_jobs: # triggers: diff --git a/setup.py b/setup.py index 27ab6efd69ef1e..d08f9153826573 100644 --- a/setup.py +++ b/setup.py @@ -236,6 +236,7 @@ def run(self): extras = {} +extras["blob"] = [] extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic") extras["sklearn"] = deps_list("scikit-learn") diff --git a/tests/test_tokenization_common.py b/tests/test_tokenization_common.py index bdb7b6ce673896..ce04fa3f842581 100644 --- a/tests/test_tokenization_common.py +++ b/tests/test_tokenization_common.py @@ -45,6 +45,7 @@ SpecialTokensMixin, Trainer, TrainingArguments, + is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, @@ -2928,8 +2929,10 @@ def test_batch_encode_dynamic_overflowing(self): returned_tensor = "pt" elif is_tf_available(): returned_tensor = "tf" - else: + elif is_flax_available(): returned_tensor = "jax" + else: + return if not tokenizer.pad_token or tokenizer.pad_token_id < 0: return From 16242e1bf07450c5dc39fe64fbc810c877455519 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 15 Sep 2022 20:10:16 +0200 Subject: [PATCH 311/539] Run `torchdynamo` tests (#19056) * Enable torchdynamo tests * make style Co-authored-by: ydshieh --- .../Dockerfile | 18 ++++++++++++++++++ src/transformers/trainer.py | 3 +-- tests/trainer/test_trainer.py | 12 +++++++++++- 3 files changed, 30 insertions(+), 3 deletions(-) diff --git a/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile b/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile index 1854d9f4b38d48..573e09c22a9c05 100644 --- a/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile +++ b/docker/transformers-pytorch-deepspeed-nightly-gpu/Dockerfile @@ -27,6 +27,24 @@ RUN python3 -m pip uninstall -y deepspeed # RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \ # DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_AIO=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 +# For `torchdynamo` tests +# (see https://github.com/huggingface/transformers/pull/17765) +RUN git clone https://github.com/pytorch/functorch +RUN python3 -m pip install --no-cache-dir ./functorch[aot] +RUN cd functorch && python3 setup.py develop + +RUN git clone https://github.com/pytorch/torchdynamo +RUN python3 -m pip install -r ./torchdynamo/requirements.txt +RUN cd torchdynamo && python3 setup.py develop + +# install TensorRT +RUN python3 -m pip install --no-cache-dir -U nvidia-pyindex +RUN python3 -m pip install --no-cache-dir -U nvidia-tensorrt==8.2.4.2 + +# install torch_tensorrt (fx path) +RUN git clone https://github.com/pytorch/TensorRT.git +RUN cd TensorRT/py && python3 setup.py install --fx-only + # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 27e44ea0ba0bd4..6cae5a6ea0069f 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -638,14 +638,13 @@ def __init__( raise RuntimeError("Torchdynamo is not installed.") import torchdynamo from torchdynamo.optimizations import backends - from torchdynamo.optimizations.training import aot_autograd_speedup_strategy def get_ctx(): # Normal if args.torchdynamo == "eager": return torchdynamo.optimize("eager") elif args.torchdynamo == "nvfuser": - return torchdynamo.optimize(aot_autograd_speedup_strategy) + return torchdynamo.optimize("aot_nvfuser") # TensorRT if args.torchdynamo in ["fx2trt-fp16", "fx2trt"]: if not is_torch_tensorrt_fx_available(): diff --git a/tests/trainer/test_trainer.py b/tests/trainer/test_trainer.py index f48265ffa58168..a8f4c11dcc4101 100644 --- a/tests/trainer/test_trainer.py +++ b/tests/trainer/test_trainer.py @@ -1799,6 +1799,8 @@ def test_fp16_full_eval(self): @require_torchdynamo @require_torch_tensorrt_fx def test_torchdynamo_full_eval(self): + import torchdynamo + # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu n_gpus = get_gpu_count() @@ -1820,11 +1822,13 @@ def test_torchdynamo_full_eval(self): metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) del trainer + torchdynamo.reset() # 3. TorchDynamo nvfuser trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="nvfuser") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) + torchdynamo.reset() # 4. TorchDynamo fx2trt trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="fx2trt") @@ -1832,6 +1836,7 @@ def test_torchdynamo_full_eval(self): t1 = metrics["eval_loss"] t2 = original_eval_loss self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) + torchdynamo.reset() # 5. TorchDynamo fx2trt-fp16 trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="fx2trt-fp16") @@ -1840,11 +1845,14 @@ def test_torchdynamo_full_eval(self): t2 = original_eval_loss # fp16 has accuracy accuracy degradation self.assertLess(np.max(np.abs(t1 - t2)), 1e-3) + torchdynamo.reset() @require_torch_non_multi_gpu @require_torchdynamo def test_torchdynamo_memory(self): # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu + import torchdynamo + class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): x = inputs["x"] @@ -1861,7 +1869,7 @@ def __init__(self): def forward(self, x): for _ in range(20): - x = torch.nn.functional.relu(x) + x = torch.cos(x) return x mod = MyModule() @@ -1881,6 +1889,7 @@ def forward(self, x): orig_loss = trainer.training_step(mod, {"x": a}) orig_peak_mem = torch.cuda.max_memory_allocated() + torchdynamo.reset() del trainer # 2. TorchDynamo nvfuser @@ -1899,6 +1908,7 @@ def forward(self, x): loss = trainer.training_step(mod, {"x": a}) peak_mem = torch.cuda.max_memory_allocated() + torchdynamo.reset() del trainer # Functional check From f3d38632551c7a4bad76399a73541a9e4ee3130c Mon Sep 17 00:00:00 2001 From: Shijie Wu Date: Thu, 15 Sep 2022 14:25:32 -0400 Subject: [PATCH 312/539] fix arg name in BLOOM testing and remove unused arg document (#18843) --- src/transformers/models/bloom/configuration_bloom.py | 8 +------- tests/models/bloom/test_modeling_bloom.py | 8 ++++---- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/src/transformers/models/bloom/configuration_bloom.py b/src/transformers/models/bloom/configuration_bloom.py index 4bcc6e2edbcfcc..1103a8148ae1e3 100644 --- a/src/transformers/models/bloom/configuration_bloom.py +++ b/src/transformers/models/bloom/configuration_bloom.py @@ -62,18 +62,12 @@ class BloomConfig(PretrainedConfig): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. - attn_pdrop (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. apply_residual_connection_post_layernorm (`bool`, *optional*, defaults to `False`): If enabled, use the layer norm of the hidden states as the residual in the transformer blocks - skip_bias_add (`bool`, *optional*, defaults to `True`): - If set to `True`, it will skip bias add for each linear layer in the transformer blocks - skip_bias_add_qkv (`bool`, *optional*, defaults to `False`): - If set to `True`, it will skip bias add for the first linear layer in the transformer blocks hidden_dropout (`float`, *optional*, defaults to 0.1): Dropout rate of the dropout function on the bias dropout. attention_dropout (`float`, *optional*, defaults to 0.1): @@ -124,7 +118,7 @@ def __init__( n_head=8, layer_norm_epsilon=1e-5, initializer_range=0.02, - use_cache=False, + use_cache=True, bos_token_id=1, eos_token_id=2, apply_residual_connection_post_layernorm=False, diff --git a/tests/models/bloom/test_modeling_bloom.py b/tests/models/bloom/test_modeling_bloom.py index 12f66b63a837a3..aa7894d79e26c1 100644 --- a/tests/models/bloom/test_modeling_bloom.py +++ b/tests/models/bloom/test_modeling_bloom.py @@ -57,7 +57,7 @@ def __init__( intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, + attention_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, @@ -81,7 +81,7 @@ def __init__( self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.attention_dropout_prob = attention_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size @@ -118,8 +118,8 @@ def get_config(self, gradient_checkpointing=False, slow_but_exact=True): hidden_size=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, - resid_pdrop=self.hidden_dropout_prob, - attn_pdrop=self.attention_probs_dropout_prob, + hidden_dropout=self.hidden_dropout_prob, + attention_dropout=self.attention_dropout_prob, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, From 0b5c7e4838fddeb4601acbb80cd2c71ddcf76726 Mon Sep 17 00:00:00 2001 From: Colin Dean Date: Thu, 15 Sep 2022 15:53:36 -0400 Subject: [PATCH 313/539] Adds package and requirement spec output to version check exception (#18702) * Adds package and requirement spec output to version check exception It's difficult to understand what package is affected when `got_ver` here comes back None, so output the requirement and the package. The requirement probably contains the package but let's output both for good measure. Non-exhaustive references for this problem aside from my own encounter: * https://stackoverflow.com/questions/70151167/valueerror-got-ver-is-none-when-importing-tensorflow * https://discuss.huggingface.co/t/valueerror-got-ver-is-none/17465 * https://github.com/UKPLab/sentence-transformers/issues/1186 * https://github.com/huggingface/transformers/issues/13356 I speculate that the root of the error comes from a conflict of conda-managed and pip-managed Python packages but I've not yet proven this. * Combines version presence check and streamlines exception message See also: https://github.com/huggingface/transformers/pull/18702#discussion_r953223275 Co-authored-by: Stas Bekman --- src/transformers/utils/versions.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/transformers/utils/versions.py b/src/transformers/utils/versions.py index 14db9b55e59704..b97e75a3368282 100644 --- a/src/transformers/utils/versions.py +++ b/src/transformers/utils/versions.py @@ -41,10 +41,11 @@ def _compare_versions(op, got_ver, want_ver, requirement, pkg, hint): - if got_ver is None: - raise ValueError("got_ver is None") - if want_ver is None: - raise ValueError("want_ver is None") + if got_ver is None or want_ver is None: + raise ValueError( + f"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" + f" reinstalling {pkg}." + ) if not ops[op](version.parse(got_ver), version.parse(want_ver)): raise ImportError( f"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" From c8e40d6fa152a710418c9d5cfad7a1b54a98459c Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Fri, 16 Sep 2022 09:07:02 +0200 Subject: [PATCH 314/539] fix `use_cache` (#19060) - set `use_cache` to `True` for consistency with other `transformers` models --- src/transformers/models/trocr/configuration_trocr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/trocr/configuration_trocr.py b/src/transformers/models/trocr/configuration_trocr.py index 0f8729df6917bb..d27b032ebefa2a 100644 --- a/src/transformers/models/trocr/configuration_trocr.py +++ b/src/transformers/models/trocr/configuration_trocr.py @@ -117,7 +117,7 @@ def __init__( classifier_dropout=0.0, init_std=0.02, decoder_layerdrop=0.0, - use_cache=False, + use_cache=True, scale_embedding=False, use_learned_position_embeddings=True, layernorm_embedding=True, From c603c80f46881ae18b2ca50770ef65fa4033eacd Mon Sep 17 00:00:00 2001 From: Michael Benayoun Date: Fri, 16 Sep 2022 10:57:41 +0200 Subject: [PATCH 315/539] FX support for ConvNext, Wav2Vec2 and ResNet (#19053) * Support for ConvNext * Support for Wav2Vec2 * Support for Resnet * Fix small issue in test_modeling_convnext --- .../models/wav2vec2/modeling_wav2vec2.py | 2 +- .../modeling_wav2vec2_conformer.py | 2 +- src/transformers/utils/fx.py | 5 +- .../models/convnext/test_modeling_convnext.py | 1 + tests/models/resnet/test_modeling_resnet.py | 1 + .../models/wav2vec2/test_modeling_wav2vec2.py | 109 ++++++++++++++++++ 6 files changed, 117 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index 9f678080039618..e1676399c14d08 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -960,7 +960,7 @@ def forward(self, hidden_states, mask_time_indices=None): # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) - codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( + codevector_probs = hidden_states.new_zeros(hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) diff --git a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py index 5bee0d040c8ba4..8723c6338d2d83 100644 --- a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py @@ -1023,7 +1023,7 @@ def forward(self, hidden_states, mask_time_indices=None): # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) - codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( + codevector_probs = hidden_states.new_zeros(hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index c08f6766c9dfc4..d3255baf847061 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -104,6 +104,7 @@ def _generate_supported_model_class_names( "blenderbot-small", "bloom", "clip", + "convnext", "deberta", "deberta-v2", "distilbert", @@ -125,6 +126,7 @@ def _generate_supported_model_class_names( "opt", "pegasus", "plbart", + "resnet", "roberta", "speech_to_text", "speech_to_text_2", @@ -133,6 +135,7 @@ def _generate_supported_model_class_names( "trocr", "vit", "xglm", + "wav2vec2", # "xlnet", ] @@ -743,7 +746,7 @@ def _generate_dummy_input( elif hasattr(model.config, "encoder"): image_size = model.config.encoder.image_size else: - raise AttributeError('Could not find the "image_size" field in the model config') + image_size = (_generate_random_int(), _generate_random_int()) # If no num_channels is in the config, use some arbitrary value. num_channels = getattr(model.config, "num_channels", 3) diff --git a/tests/models/convnext/test_modeling_convnext.py b/tests/models/convnext/test_modeling_convnext.py index 46ef3ce71709cc..1225175a1b0641 100644 --- a/tests/models/convnext/test_modeling_convnext.py +++ b/tests/models/convnext/test_modeling_convnext.py @@ -137,6 +137,7 @@ class ConvNextModelTest(ModelTesterMixin, unittest.TestCase): else () ) + fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False diff --git a/tests/models/resnet/test_modeling_resnet.py b/tests/models/resnet/test_modeling_resnet.py index 83f08b68afb8be..557883e0b1ba9f 100644 --- a/tests/models/resnet/test_modeling_resnet.py +++ b/tests/models/resnet/test_modeling_resnet.py @@ -126,6 +126,7 @@ class ResNetModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (ResNetModel, ResNetForImageClassification) if is_torch_available() else () + fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False diff --git a/tests/models/wav2vec2/test_modeling_wav2vec2.py b/tests/models/wav2vec2/test_modeling_wav2vec2.py index 21f77b19a553ca..040731472fe5bc 100644 --- a/tests/models/wav2vec2/test_modeling_wav2vec2.py +++ b/tests/models/wav2vec2/test_modeling_wav2vec2.py @@ -15,6 +15,9 @@ """ Testing suite for the PyTorch Wav2Vec2 model. """ import math +import os +import pickle +import tempfile import unittest import numpy as np @@ -32,6 +35,7 @@ slow, torch_device, ) +from transformers.utils import is_torch_fx_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( @@ -72,6 +76,10 @@ from transformers import Wav2Vec2ProcessorWithLM +if is_torch_fx_available(): + from transformers.utils.fx import symbolic_trace + + class Wav2Vec2ModelTester: def __init__( self, @@ -411,6 +419,7 @@ class Wav2Vec2ModelTest(ModelTesterMixin, unittest.TestCase): if is_torch_available() else () ) + fx_compatible = True test_pruning = False test_headmasking = False @@ -633,6 +642,106 @@ def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) + # Wav2Vec2 cannot be torchscripted because of group norm. + def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): + if not is_torch_fx_available() or not self.fx_compatible: + return + + configs_no_init = _config_zero_init(config) # To be sure we have no Nan + configs_no_init.return_dict = False + + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + model.to(torch_device) + model.eval() + inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) + + try: + input_names = [ + "attention_mask", + "bbox", + "input_features", + "input_ids", + "input_values", + "pixel_values", + "token_type_ids", + "visual_feats", + "visual_pos", + ] + + labels = inputs.get("labels", None) + start_positions = inputs.get("start_positions", None) + end_positions = inputs.get("end_positions", None) + if labels is not None: + input_names.append("labels") + if start_positions is not None: + input_names.append("start_positions") + if end_positions is not None: + input_names.append("end_positions") + + filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} + input_names = list(filtered_inputs.keys()) + + model_output = model(**filtered_inputs) + + if ( + isinstance(model, Wav2Vec2ForSequenceClassification) + and not hasattr(model.config, "problem_type") + or model.config.problem_type is None + ): + model.config.problem_type = "single_label_classification" + + traced_model = symbolic_trace(model, input_names) + traced_output = traced_model(**filtered_inputs) + + except Exception as e: + self.fail(f"Couldn't trace module: {e}") + + def flatten_output(output): + flatten = [] + for x in output: + if isinstance(x, (tuple, list)): + flatten += flatten_output(x) + elif not isinstance(x, torch.Tensor): + continue + else: + flatten.append(x) + return flatten + + model_output = flatten_output(model_output) + traced_output = flatten_output(traced_output) + num_outputs = len(model_output) + + for i in range(num_outputs): + self.assertTrue( + torch.allclose(model_output[i], traced_output[i]), + f"traced {i}th output doesn't match model {i}th output for {model_class}", + ) + + # Test that the model can be serialized and restored properly + with tempfile.TemporaryDirectory() as tmp_dir_name: + pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") + try: + with open(pkl_file_name, "wb") as f: + pickle.dump(traced_model, f) + with open(pkl_file_name, "rb") as f: + loaded = pickle.load(f) + except Exception as e: + self.fail(f"Couldn't serialize / deserialize the traced model: {e}") + + loaded_output = loaded(**filtered_inputs) + loaded_output = flatten_output(loaded_output) + + for i in range(num_outputs): + self.assertTrue( + torch.allclose(model_output[i], loaded_output[i]), + f"serialized model {i}th output doesn't match model {i}th output for {model_class}", + ) + + # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. + # (Even with this call, there are still memory leak by ~0.04MB) + self.clear_torch_jit_class_registry() + @require_torch class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase): From 532ca05079e6e8be04407df059c9159c15725522 Mon Sep 17 00:00:00 2001 From: Tom Aarsen <37621491+tomaarsen@users.noreply.github.com> Date: Fri, 16 Sep 2022 13:31:39 +0200 Subject: [PATCH 316/539] [doc] Fix link in PreTrainedModel documentation (#19065) --- src/transformers/modeling_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 930e97fc324bfe..af32c3f98f6fe3 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1472,7 +1472,7 @@ def save_pretrained( ): """ Save a model and its configuration file to a directory, so that it can be re-loaded using the - `[`~PreTrainedModel.from_pretrained`]` class method. + [`~PreTrainedModel.from_pretrained`] class method. Arguments: save_directory (`str` or `os.PathLike`): From d63bdf78d44576ce81488898f86b40e611a4320b Mon Sep 17 00:00:00 2001 From: Jim Briggs Date: Fri, 16 Sep 2022 13:42:57 +0100 Subject: [PATCH 317/539] Add FP32 cast in ConvNext LayerNorm to prevent rounding errors with FP16 input (#18746) * Adding cast to fp32 in convnext layernorm to prevent rounding errors in the case of fp16 input * Trigger CI --- src/transformers/models/convnext/modeling_convnext.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/transformers/models/convnext/modeling_convnext.py b/src/transformers/models/convnext/modeling_convnext.py index e9274f1e54d111..44d34c833b4336 100755 --- a/src/transformers/models/convnext/modeling_convnext.py +++ b/src/transformers/models/convnext/modeling_convnext.py @@ -109,9 +109,12 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: if self.data_format == "channels_last": x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": + input_dtype = x.dtype + x = x.float() u = x.mean(1, keepdim=True) s = (x - u).pow(2).mean(1, keepdim=True) x = (x - u) / torch.sqrt(s + self.eps) + x = x.to(dtype=input_dtype) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x From 820cb97a3f8929d29600efb0ca4469cd11ad48f2 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 16 Sep 2022 09:19:51 -0400 Subject: [PATCH 318/539] Organize test jobs (#19058) * Tests conditional run * Syntax * Deps * Try early exit * Another way * Test with no tests to run * Test all * Typo * Try this way * With tests to run * Mostly finished * Typo * With a modification in one file only * No change, no tests * Final cleanup * Address review comments --- .circleci/config.yml | 771 ++++++++++++------------------------------- setup.py | 1 - 2 files changed, 220 insertions(+), 552 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 7ff545b2a39a0c..e911a2f4d0e486 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -62,48 +62,50 @@ references: jobs: - run_tests_torch_and_tf: + # Fetch the tests to run + fetch_tests: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - RUN_PT_TF_CROSS_TESTS: yes - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-torch_and_tf-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs - - run: git lfs install - run: pip install --upgrade pip - - run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - - run: pip install tensorflow_probability - - run: pip install https://github.com/kpu/kenlm/archive/master.zip - - run: pip install git+https://github.com/huggingface/accelerate - - save_cache: - key: v0.5-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt + - run: pip install GitPython + - run: pip install . + - run: mkdir -p test_preparation + - run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt - store_artifacts: - path: ~/transformers/test_preparation.txt + path: ~/transformers/tests_fetched_summary.txt - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports + if [ -f test_list.txt ]; then + mv test_list.txt test_preparation/test_list.txt + else + touch test_preparation/test_list.txt + fi + + - persist_to_workspace: + root: test_preparation/ + paths: + test_list.txt - run_tests_torch_and_tf_all: + # To run all tests for the nightly build + fetch_all_tests: + working_directory: ~/transformers + docker: + - image: cimg/python:3.7.12 + parallelism: 1 + steps: + - run: | + mkdir test_preparation + echo "tests" > test_preparation/test_list.txt + + - persist_to_workspace: + root: test_preparation/ + paths: + test_list.txt + + run_tests_torch_and_tf: working_directory: ~/transformers docker: - image: cimg/python:3.7.12 @@ -116,6 +118,13 @@ jobs: parallelism: 1 steps: - checkout + - attach_workspace: + at: ~/transformers/test_preparation + - run: | + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-torch_and_tf-{{ checksum "setup.py" }} @@ -132,8 +141,7 @@ jobs: key: v0.5-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf tests -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -152,45 +160,13 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-torch_and_flax-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - - run: pip install --upgrade pip - - run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - - run: pip install https://github.com/kpu/kenlm/archive/master.zip - - run: pip install git+https://github.com/huggingface/accelerate - - save_cache: - key: v0.5-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_tests_torch_and_flax_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - RUN_PT_FLAX_CROSS_TESTS: yes - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-torch_and_flax-{{ checksum "setup.py" }} @@ -205,8 +181,7 @@ jobs: key: v0.5-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax tests -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -224,49 +199,18 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-torch-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time - - run: pip install --upgrade pip - - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - - run: pip install https://github.com/kpu/kenlm/archive/master.zip - - run: pip install git+https://github.com/huggingface/accelerate - - save_cache: - key: v0.5-torch-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_list.txt) | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_tests_torch_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng + - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time - run: pip install --upgrade pip - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html @@ -276,8 +220,7 @@ jobs: key: v0.5-torch-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch tests | tee tests_output.txt + - run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -295,43 +238,13 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-tf-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - - run: pip install --upgrade pip - - run: pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision] - - run: pip install tensorflow_probability - - run: pip install https://github.com/kpu/kenlm/archive/master.zip - - save_cache: - key: v0.5-tf-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_list.txt) | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_tests_tf_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-tf-{{ checksum "setup.py" }} @@ -345,8 +258,7 @@ jobs: key: v0.5-tf-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf tests | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -364,56 +276,26 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-flax-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - - run: pip install --upgrade pip - - run: pip install .[flax,testing,sentencepiece,flax-speech,vision] - - run: pip install https://github.com/kpu/kenlm/archive/master.zip - - save_cache: - key: v0.5-flax-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_list.txt) | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_tests_flax_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-flax-{{ checksum "setup.py" }} - v0.5-{{ checksum "setup.py" }} - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - - run: pip install .[flax,testing,sentencepiece,vision,flax-speech] + - run: pip install .[flax,testing,sentencepiece,flax-speech,vision] - run: pip install https://github.com/kpu/kenlm/archive/master.zip - save_cache: key: v0.5-flax-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax tests | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -432,44 +314,13 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-torch-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - - run: pip install --upgrade pip - - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] - - run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html - - run: pip install https://github.com/kpu/kenlm/archive/master.zip - - save_cache: - key: v0.5-torch-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test $(cat test_list.txt) | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_tests_pipelines_torch_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - RUN_PIPELINE_TESTS: yes - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} @@ -483,8 +334,7 @@ jobs: key: v0.5-torch-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test tests | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test $(cat test_preparation/test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -503,42 +353,13 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-tf-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: pip install --upgrade pip - - run: pip install .[sklearn,tf-cpu,testing,sentencepiece] - - run: pip install tensorflow_probability - - save_cache: - key: v0.5-tf-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf $(cat test_list.txt) -m is_pipeline_test | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_tests_pipelines_tf_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - RUN_PIPELINE_TESTS: yes - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-tf-{{ checksum "setup.py" }} @@ -550,8 +371,7 @@ jobs: key: v0.5-tf-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf tests -m is_pipeline_test | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf $(cat test_preparation/test_list.txt) -m is_pipeline_test | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -567,39 +387,13 @@ jobs: PYTEST_TIMEOUT: 120 steps: - checkout - - restore_cache: - keys: - - v0.5-custom_tokenizers-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: pip install --upgrade pip - - run: pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba] - - run: python -m unidic download - - save_cache: - key: v0.5-custom_tokenizers-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest --max-worker-restart=0 -s --make-reports=tests_custom_tokenizers ./tests/models/bert_japanese/test_tokenization_bert_japanese.py ./tests/models/openai/test_tokenization_openai.py ./tests/models/clip/test_tokenization_clip.py | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_tests_custom_tokenizers_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - RUN_CUSTOM_TOKENIZERS: yes - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-custom_tokenizers-{{ checksum "setup.py" }} @@ -629,42 +423,13 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-torch_examples-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - - run: pip install --upgrade pip - - run: pip install .[sklearn,torch,sentencepiece,testing,torch-speech] - - run: pip install -r examples/pytorch/_tests_requirements.txt - - save_cache: - key: v0.5-torch_examples-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py --filters examples tests | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_torch ./examples/pytorch/ | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/examples_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_examples_torch_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-torch_examples-{{ checksum "setup.py" }} @@ -677,8 +442,7 @@ jobs: key: v0.5-torch_examples-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - TRANSFORMERS_IS_CI=1 python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_torch ./examples/pytorch/ | tee examples_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_torch ./examples/pytorch/ | tee tests_output.txt - store_artifacts: path: ~/transformers/examples_output.txt - store_artifacts: @@ -696,41 +460,13 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-tensorflow_examples-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: pip install --upgrade pip - - run: pip install .[sklearn,tensorflow,sentencepiece,testing] - - run: pip install -r examples/tensorflow/_tests_requirements.txt - - save_cache: - key: v0.5-tensorflow_examples-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py --filters examples tests | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_tensorflow ./examples/tensorflow/ | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tensorflow_examples_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_examples_tensorflow_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-tensorflow_examples-{{ checksum "setup.py" }} @@ -742,8 +478,7 @@ jobs: key: v0.5-tensorflow_examples-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - TRANSFORMERS_IS_CI=1 python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_tensorflow ./examples/tensorflow/ | tee examples_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_tensorflow ./examples/tensorflow/ | tee tests_output.txt - store_artifacts: path: ~/transformers/tensorflow_examples_output.txt - store_artifacts: @@ -761,41 +496,13 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-flax_examples-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: pip install --upgrade pip - - run: pip install .[flax,testing,sentencepiece] - - run: pip install -r examples/flax/_tests_requirements.txt - - save_cache: - key: v0.5-flax_examples-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py --filters examples tests | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_flax ./examples/flax/ | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/flax_examples_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_examples_flax_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-flax_examples-{{ checksum "setup.py" }} @@ -807,8 +514,7 @@ jobs: key: v0.5-flax_examples-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - TRANSFORMERS_IS_CI=1 python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_flax ./examples/flax/ | tee examples_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_flax ./examples/flax/ | tee tests_output.txt - store_artifacts: path: ~/transformers/flax_examples_output.txt - store_artifacts: @@ -827,45 +533,13 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-hub-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install git-lfs + - attach_workspace: + at: ~/transformers/test_preparation - run: | - git config --global user.email "ci@dummy.com" - git config --global user.name "ci" - - run: pip install --upgrade pip - - run: pip install .[torch,sentencepiece,testing] - - save_cache: - key: v0.5-hub-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt - - run: | - if [ -f test_list.txt ]; then - python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_list.txt) -m is_staging_test | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_tests_hub_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - HUGGINGFACE_CO_STAGING: yes - RUN_GIT_LFS_TESTS: yes - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-hub-{{ checksum "setup.py" }} @@ -880,8 +554,7 @@ jobs: key: v0.5-hub-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: | - python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub tests -m is_staging_test | tee tests_output.txt + - run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/test_list.txt) -m is_staging_test | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -899,6 +572,13 @@ jobs: parallelism: 1 steps: - checkout + - attach_workspace: + at: ~/transformers/test_preparation + - run: | + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} @@ -909,42 +589,8 @@ jobs: key: v0.5-onnx-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt - - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_list.txt) -k onnx | tee tests_output.txt - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports + - run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/test_list.txt) -k onnx | tee tests_output.txt - run_tests_onnxruntime_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout - - restore_cache: - keys: - - v0.5-torch-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: pip install --upgrade pip - - run: pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision] - - save_cache: - key: v0.5-onnx-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: | - python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx tests -k onnx | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -1022,48 +668,13 @@ jobs: parallelism: 1 steps: - checkout - - restore_cache: - keys: - - v0.5-torch-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} - - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev - - run: pip install --upgrade pip - - run: pip install .[torch,testing,vision] - - run: pip install torchvision - # The commit `36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0` in `detectron2` break things. - # See https://github.com/facebookresearch/detectron2/commit/36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0#comments. - # TODO: Revert this change back once the above issue is fixed. - - run: python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' - - run: sudo apt install tesseract-ocr - - run: pip install pytesseract - - save_cache: - key: v0.5-torch-{{ checksum "setup.py" }} - paths: - - '~/.cache/pip' - - run: python utils/tests_fetcher.py | tee test_preparation.txt - - store_artifacts: - path: ~/transformers/test_preparation.txt + - attach_workspace: + at: ~/transformers/test_preparation - run: | - if [ -f test_list.txt ]; then - python -m pytest -n 1 --max-worker-restart=0 tests/models/*layoutlmv* --dist=loadfile -s --make-reports=tests_layoutlmv2_and_v3 --durations=100 - fi - - store_artifacts: - path: ~/transformers/tests_output.txt - - store_artifacts: - path: ~/transformers/reports - - run_tests_layoutlmv2_and_v3_all: - working_directory: ~/transformers - docker: - - image: cimg/python:3.7.12 - environment: - OMP_NUM_THREADS: 1 - TRANSFORMERS_IS_CI: yes - PYTEST_TIMEOUT: 120 - resource_class: xlarge - parallelism: 1 - steps: - - checkout + if [ ! -s test_preparation/test_list.txt ]; then + echo "No tests to run, exiting early!" + circleci-agent step halt + fi - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} @@ -1130,20 +741,49 @@ workflows: jobs: - check_code_quality - check_repository_consistency - - run_examples_torch - - run_examples_tensorflow - - run_examples_flax - - run_tests_custom_tokenizers - - run_tests_torch_and_tf - - run_tests_torch_and_flax - - run_tests_torch - - run_tests_tf - - run_tests_flax - - run_tests_pipelines_torch - - run_tests_pipelines_tf - - run_tests_onnxruntime - - run_tests_hub - - run_tests_layoutlmv2_and_v3 + - fetch_tests + - run_examples_torch: + requires: + - fetch_tests + - run_examples_tensorflow: + requires: + - fetch_tests + - run_examples_flax: + requires: + - fetch_tests + - run_tests_custom_tokenizers: + requires: + - fetch_tests + - run_tests_torch_and_tf: + requires: + - fetch_tests + - run_tests_torch_and_flax: + requires: + - fetch_tests + - run_tests_torch: + requires: + - fetch_tests + - run_tests_tf: + requires: + - fetch_tests + - run_tests_flax: + requires: + - fetch_tests + - run_tests_pipelines_torch: + requires: + - fetch_tests + - run_tests_pipelines_tf: + requires: + - fetch_tests + - run_tests_onnxruntime: + requires: + - fetch_tests + - run_tests_hub: + requires: + - fetch_tests + - run_tests_layoutlmv2_and_v3: + requires: + - fetch_tests nightly: triggers: - schedule: @@ -1153,20 +793,49 @@ workflows: only: - main jobs: - - run_examples_torch_all - - run_examples_tensorflow_all - - run_examples_flax_all - - run_tests_custom_tokenizers_all - - run_tests_torch_and_tf_all - - run_tests_torch_and_flax_all - - run_tests_torch_all - - run_tests_tf_all - - run_tests_flax_all - - run_tests_pipelines_torch_all - - run_tests_pipelines_tf_all - - run_tests_onnxruntime_all - - run_tests_hub_all - - run_tests_layoutlmv2_and_v3_all + - fetch_all_tests + - run_examples_torch: + requires: + - fetch_all_tests + - run_examples_tensorflow: + requires: + - fetch_all_tests + - run_examples_flax: + requires: + - fetch_all_tests + - run_tests_custom_tokenizers: + requires: + - fetch_all_tests + - run_tests_torch_and_tf: + requires: + - fetch_all_tests + - run_tests_torch_and_flax: + requires: + - fetch_all_tests + - run_tests_torch: + requires: + - fetch_all_tests + - run_tests_tf: + requires: + - fetch_all_tests + - run_tests_flax: + requires: + - fetch_all_tests + - run_tests_pipelines_torch: + requires: + - fetch_all_tests + - run_tests_pipelines_tf: + requires: + - fetch_all_tests + - run_tests_onnxruntime: + requires: + - fetch_all_tests + - run_tests_hub: + requires: + - fetch_all_tests + - run_tests_layoutlmv2_and_v3: + requires: + - fetch_all_tests # tpu_testing_jobs: # triggers: diff --git a/setup.py b/setup.py index d08f9153826573..27ab6efd69ef1e 100644 --- a/setup.py +++ b/setup.py @@ -236,7 +236,6 @@ def run(self): extras = {} -extras["blob"] = [] extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic") extras["sklearn"] = deps_list("scikit-learn") From bc5d0b104647eb2942c8679d955196d7624937db Mon Sep 17 00:00:00 2001 From: Omar Sanseviero Date: Fri, 16 Sep 2022 15:40:38 +0200 Subject: [PATCH 319/539] Automatically tag CLIP repos as zero-shot-image-classification (#19064) * Add CLIP to zero-shot-image-classification * Make mapping private as it's not used for AutoClassing --- src/transformers/models/auto/modeling_auto.py | 7 +++++++ utils/update_metadata.py | 5 +++++ 2 files changed, 12 insertions(+) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index caa4c9d4dffd66..7f4968d03cdf66 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -771,6 +771,13 @@ ] ) +_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict( + [ + # Model for Zero Shot Image Classification mapping + ("clip", "CLIPModel"), + ] +) + MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES) MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES) MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_LM_HEAD_MAPPING_NAMES) diff --git a/utils/update_metadata.py b/utils/update_metadata.py index 5ccd07aeb70c40..aaf296c0436b08 100644 --- a/utils/update_metadata.py +++ b/utils/update_metadata.py @@ -91,6 +91,11 @@ "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), + ( + "zero-shot-image-classification", + "_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", + "AutoModel", + ), ] From 70ba10e6d4684f56c50b740793498d4d2dd8a7bd Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 16 Sep 2022 16:23:58 +0200 Subject: [PATCH 320/539] Fix `LeViT` checkpoint (#19069) Co-authored-by: ydshieh --- src/transformers/models/levit/configuration_levit.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/levit/configuration_levit.py b/src/transformers/models/levit/configuration_levit.py index 38bf7c2d50f650..b0258a56c2d75b 100644 --- a/src/transformers/models/levit/configuration_levit.py +++ b/src/transformers/models/levit/configuration_levit.py @@ -37,7 +37,7 @@ class LevitConfig(PretrainedConfig): This is the configuration class to store the configuration of a [`LevitModel`]. It is used to instantiate a LeViT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the LeViT - [facebook/levit-base-192](https://huggingface.co/facebook/levit-base-192) architecture. + [facebook/levit-128S](https://huggingface.co/facebook/levit-128S) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. @@ -78,10 +78,10 @@ class LevitConfig(PretrainedConfig): ```python >>> from transformers import LevitModel, LevitConfig - >>> # Initializing a LeViT levit-base-192 style configuration + >>> # Initializing a LeViT levit-128S style configuration >>> configuration = LevitConfig() - >>> # Initializing a model from the levit-base-192 style configuration + >>> # Initializing a model from the levit-128S style configuration >>> model = LevitModel(configuration) >>> # Accessing the model configuration From 658010c739f61f5449e893954a14379923ebf387 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Fri, 16 Sep 2022 16:38:08 +0100 Subject: [PATCH 321/539] TF: tests for (de)serializable models with resized tokens (#19013) * resized models that we can actually load * separate embeddings check * add test for embeddings out of bounds * add fake slows --- src/transformers/modeling_tf_utils.py | 37 ++++++++++- .../models/bart/modeling_tf_bart.py | 28 ++++++++- .../vit_mae/test_modeling_tf_vit_mae.py | 1 + tests/test_modeling_tf_common.py | 61 ++++++++++++++++++- 4 files changed, 123 insertions(+), 4 deletions(-) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index bbcbf125d8d4f1..a90d1f0ebe49cf 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -1861,7 +1861,7 @@ def _v2_resize_token_embeddings(self, new_num_tokens): # If word embeddings are not tied, make sure that lm head bias is resized as well if self.get_bias() is not None: old_lm_head_bias = self.get_bias() - new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) + new_lm_head_bias = self._v2_get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens) self.set_bias(new_lm_head_bias) # If word embeddings are not tied, make sure that lm head decoder is resized as well. @@ -1891,6 +1891,7 @@ def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): Return: `tf.Variable`: Pointer to the resized bias. """ + # TODO (joao): flagged for replacement (by `_v2_get_resized_lm_head_bias`) due to embeddings refactor new_lm_head_bias = {} for attr, weight in old_lm_head_bias.items(): @@ -1926,6 +1927,40 @@ def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens): return new_lm_head_bias + def _v2_get_resized_lm_head_bias( + self, old_lm_head_bias: Dict[str, tf.Variable], new_num_tokens: int + ) -> Dict[str, tf.Tensor]: + """ + Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end. + Reducing the size will remove vectors from the end + + Args: + old_lm_head_bias (`Dict[str, tf.Variable]`): + Old lm head bias to be resized. + new_num_tokens (`int`): + New number of tokens in the linear matrix. Increasing the size will add newly initialized vectors at + the end. Reducing the size will remove vectors from the end. + + Return: + `tf.Tensor`: Values for the resized bias. + """ + new_lm_head_bias = {} + + for attr, weight in old_lm_head_bias.items(): + # Determine the size difference (depending on the shape) + first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight) + size_diff = new_num_tokens - old_num_tokens + + # Copy the old bias values to the new bias + if old_num_tokens > new_num_tokens: + new_bias = weight.value()[..., :new_num_tokens] + else: + padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]] + new_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape)) + + new_lm_head_bias[attr] = new_bias + return new_lm_head_bias + def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens): """ Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end. diff --git a/src/transformers/models/bart/modeling_tf_bart.py b/src/transformers/models/bart/modeling_tf_bart.py index f4e9532817cdf6..cbda2dd27b3d8a 100644 --- a/src/transformers/models/bart/modeling_tf_bart.py +++ b/src/transformers/models/bart/modeling_tf_bart.py @@ -746,6 +746,16 @@ def call( else: context_manager = nullcontext() with context_manager: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -940,6 +950,16 @@ def call( else: context_manager = nullcontext() with context_manager: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds @@ -1263,7 +1283,6 @@ def __init__(self, config, load_weight_prefix=None, *inputs, **kwargs): self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) - self.final_logits_bias = self.bias_layer.bias # alias to keep the same interface with PT def get_decoder(self): return self.model.decoder @@ -1281,7 +1300,12 @@ def get_bias(self): return {"final_logits_bias": self.bias_layer.bias} def set_bias(self, value): - self.bias_layer.bias = value["final_logits_bias"] + # Replaces the existing layers containing bias for correct (de)serialization. + vocab_size = value["final_logits_bias"].shape[-1] + self.bias_layer = BiasLayer( + name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False + ) + self.bias_layer.bias.assign(value["final_logits_bias"]) @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) diff --git a/tests/models/vit_mae/test_modeling_tf_vit_mae.py b/tests/models/vit_mae/test_modeling_tf_vit_mae.py index f05ecaf69c7afb..3bc582cb1fcd07 100644 --- a/tests/models/vit_mae/test_modeling_tf_vit_mae.py +++ b/tests/models/vit_mae/test_modeling_tf_vit_mae.py @@ -375,6 +375,7 @@ def test_keras_save_load(self): # overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test + @slow def test_save_load(self): # make mask reproducible np.random.seed(2) diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index 0c55b4d8ed31bf..620d84083ea524 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -1162,7 +1162,7 @@ def _get_word_embedding_weight(model, embedding_layer): for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: # build the embeddings - model = model_class(config=config) + model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config` old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_bias = model.get_bias() old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) @@ -1203,6 +1203,65 @@ def _get_word_embedding_weight(model, embedding_layer): models_equal = False self.assertTrue(models_equal) + # TODO (Joao): this test is not slow, but it's tagged as such to keep track of failures on the scheduled CI runs, + # while passing push CI. Fix the underlying issues and remove the tag. + @slow + def test_save_load_after_resize_token_embeddings(self): + if not self.test_resize_embeddings: + return + config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + # create a model with resized (expended) embeddings + new_tokens_size = 10 + old_total_size = config.vocab_size + new_total_size = old_total_size + new_tokens_size + model = model_class(config=copy.deepcopy(config)) # `resize_token_embeddings` mutates `config` + model(model.dummy_inputs) # builds the embeddings layer + model.resize_token_embeddings(new_total_size) + + # fetch the output for an input exclusively made of new members of the vocabulary + inputs_dict = copy.deepcopy(original_inputs_dict) + new_vocab_input_ids = ids_tensor(inputs_dict["input_ids"].shape, new_tokens_size) + new_vocab_input_ids += old_total_size + if "input_ids" in inputs_dict: + inputs_dict["input_ids"] = new_vocab_input_ids + if "decoder_input_ids" in inputs_dict: + inputs_dict["decoder_input_ids"] = new_vocab_input_ids + prepared_inputs = self._prepare_for_class(inputs_dict, model_class) + outputs = model(**prepared_inputs) + + # save and load the model + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, saved_model=False) + model = model_class.from_pretrained(tmpdirname) + restored_model_outputs = model(**prepared_inputs) + + # check that the output for the restored model is the same + self.assert_outputs_same(restored_model_outputs, outputs) + + @unittest.skipIf( + not is_tf_available() or len(tf.config.list_physical_devices("GPU")) == 0, + reason="This test always passes on CPU.", + ) + def test_embeddings_out_of_bounds_raise_exception(self): + # TF embeddings layers don't raise an exception when an index is out of bounds on GPU, so we manually raise it. + # This test should only fail on GPU for models where we haven't added the safety check. + if not self.test_resize_embeddings: + return + config, original_inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config=config) + inputs_dict = copy.deepcopy(original_inputs_dict) + if "input_ids" in inputs_dict: + inputs_dict["input_ids"] = inputs_dict["input_ids"] * int(1e9) + if "decoder_input_ids" in inputs_dict: + inputs_dict["decoder_input_ids"] = inputs_dict["decoder_input_ids"] * int(1e9) + prepared_inputs = self._prepare_for_class(inputs_dict, model_class) + with self.assertRaises(tf.errors.InvalidArgumentError): + model(**prepared_inputs) + def test_lm_head_model_random_no_beam_search_generate(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict.get("input_ids", None) From 5e636eee4af48ccd03b4d9c1a1e6f7a1b92a643f Mon Sep 17 00:00:00 2001 From: Partho Date: Fri, 16 Sep 2022 22:29:40 +0530 Subject: [PATCH 322/539] Add type hints for PyTorch UniSpeech, MPNet and Nystromformer (#19039) * added type hints pytorch unispeech * added type hints pytorch MPNet * added type hints nystromformer * resolved copy inconsistencies * make fix-copies Co-authored-by: matt --- .../models/data2vec/modeling_data2vec_audio.py | 10 +++++----- .../models/hubert/modeling_hubert.py | 17 +++++++++++------ src/transformers/models/mpnet/modeling_mpnet.py | 12 ++++++------ .../nystromformer/modeling_nystromformer.py | 12 ++++++------ .../models/unispeech/modeling_unispeech.py | 17 +++++++++++------ .../unispeech_sat/modeling_unispeech_sat.py | 17 +++++++++++------ .../models/wav2vec2/modeling_wav2vec2.py | 17 +++++++++++------ 7 files changed, 61 insertions(+), 41 deletions(-) diff --git a/src/transformers/models/data2vec/modeling_data2vec_audio.py b/src/transformers/models/data2vec/modeling_data2vec_audio.py index 70d802a80154b8..7636502cddc9f4 100755 --- a/src/transformers/models/data2vec/modeling_data2vec_audio.py +++ b/src/transformers/models/data2vec/modeling_data2vec_audio.py @@ -563,11 +563,11 @@ def __init__(self, config): def forward( self, - hidden_states, - attention_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, + hidden_states: torch.tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None diff --git a/src/transformers/models/hubert/modeling_hubert.py b/src/transformers/models/hubert/modeling_hubert.py index d6cb6b8e059920..c6444d48e85a22 100755 --- a/src/transformers/models/hubert/modeling_hubert.py +++ b/src/transformers/models/hubert/modeling_hubert.py @@ -618,7 +618,12 @@ def __init__(self, config): self.feed_forward = HubertFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - def forward(self, hidden_states, attention_mask=None, output_attentions=False): + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( @@ -649,11 +654,11 @@ def __init__(self, config): def forward( self, - hidden_states, - attention_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, + hidden_states: torch.tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None diff --git a/src/transformers/models/mpnet/modeling_mpnet.py b/src/transformers/models/mpnet/modeling_mpnet.py index e7977561fe2b1a..941804ee764b9a 100644 --- a/src/transformers/models/mpnet/modeling_mpnet.py +++ b/src/transformers/models/mpnet/modeling_mpnet.py @@ -323,12 +323,12 @@ def __init__(self, config): def forward( self, - hidden_states, - attention_mask=None, - head_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=False, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = False, **kwargs, ): position_bias = self.compute_position_bias(hidden_states) diff --git a/src/transformers/models/nystromformer/modeling_nystromformer.py b/src/transformers/models/nystromformer/modeling_nystromformer.py index e2a41a8616588a..c622c2345943ac 100755 --- a/src/transformers/models/nystromformer/modeling_nystromformer.py +++ b/src/transformers/models/nystromformer/modeling_nystromformer.py @@ -354,12 +354,12 @@ def __init__(self, config): def forward( self, - hidden_states, - attention_mask=None, - head_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None diff --git a/src/transformers/models/unispeech/modeling_unispeech.py b/src/transformers/models/unispeech/modeling_unispeech.py index dc194318e9992d..e675718e2ebf94 100755 --- a/src/transformers/models/unispeech/modeling_unispeech.py +++ b/src/transformers/models/unispeech/modeling_unispeech.py @@ -655,7 +655,12 @@ def __init__(self, config): self.feed_forward = UniSpeechFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - def forward(self, hidden_states, attention_mask=None, output_attentions=False): + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( @@ -686,11 +691,11 @@ def __init__(self, config): def forward( self, - hidden_states, - attention_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, + hidden_states: torch.tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None diff --git a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py index 926464d3bf8e8b..b3b0da824f00b7 100755 --- a/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py +++ b/src/transformers/models/unispeech_sat/modeling_unispeech_sat.py @@ -669,7 +669,12 @@ def __init__(self, config): self.feed_forward = UniSpeechSatFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - def forward(self, hidden_states, attention_mask=None, output_attentions=False): + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( @@ -700,11 +705,11 @@ def __init__(self, config): def forward( self, - hidden_states, - attention_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, + hidden_states: torch.tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index e1676399c14d08..7feb7790dc395d 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -704,7 +704,12 @@ def __init__(self, config): self.feed_forward = Wav2Vec2FeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) - def forward(self, hidden_states, attention_mask=None, output_attentions=False): + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, _ = self.attention( @@ -734,11 +739,11 @@ def __init__(self, config): def forward( self, - hidden_states, - attention_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, + hidden_states: torch.tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None From 773314ab8070db69cdd07d615108b83259e7415d Mon Sep 17 00:00:00 2001 From: fxmarty <9808326+fxmarty@users.noreply.github.com> Date: Fri, 16 Sep 2022 21:01:57 +0200 Subject: [PATCH 323/539] replace logger.warn by logger.warning (#19068) --- src/transformers/commands/pt_to_tf.py | 10 +++++----- src/transformers/utils/hub.py | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/transformers/commands/pt_to_tf.py b/src/transformers/commands/pt_to_tf.py index c126eeefcc063f..34818ca2ad0ccc 100644 --- a/src/transformers/commands/pt_to_tf.py +++ b/src/transformers/commands/pt_to_tf.py @@ -257,11 +257,11 @@ def run(self): if architectures is None: # No architecture defined -- use auto classes pt_class = getattr(import_module("transformers"), "AutoModel") tf_class = getattr(import_module("transformers"), "TFAutoModel") - self._logger.warn("No detected architecture, using AutoModel/TFAutoModel") + self._logger.warning("No detected architecture, using AutoModel/TFAutoModel") else: # Architecture defined -- use it if len(architectures) > 1: raise ValueError(f"More than one architecture was found, aborting. (architectures = {architectures})") - self._logger.warn(f"Detected architecture: {architectures[0]}") + self._logger.warning(f"Detected architecture: {architectures[0]}") pt_class = getattr(import_module("transformers"), architectures[0]) try: tf_class = getattr(import_module("transformers"), "TF" + architectures[0]) @@ -336,9 +336,9 @@ def run(self): repo.git_add(auto_lfs_track=True) repo.git_commit(commit_message) repo.git_push(blocking=True) # this prints a progress bar with the upload - self._logger.warn(f"TF weights pushed into {self._model_name}") + self._logger.warning(f"TF weights pushed into {self._model_name}") elif not self._no_pr: - self._logger.warn("Uploading the weights into a new PR...") + self._logger.warning("Uploading the weights into a new PR...") commit_descrition = ( "Model converted by the [`transformers`' `pt_to_tf`" " CLI](https://github.com/huggingface/transformers/blob/main/src/transformers/commands/pt_to_tf.py). " @@ -375,4 +375,4 @@ def run(self): repo_type="model", create_pr=True, ) - self._logger.warn(f"PR open in {hub_pr_url}") + self._logger.warning(f"PR open in {hub_pr_url}") diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index ed2271119e522f..de6fab51b8f1cd 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -1107,14 +1107,14 @@ def move_cache(cache_dir=None, new_cache_dir=None, token=None): if cache_version < 1: if is_offline_mode(): - logger.warn( + logger.warning( "You are offline and the cache for model files in Transformers v4.22.0 has been updated while your local " "cache seems to be the one of a previous version. It is very likely that all your calls to any " "`from_pretrained()` method will fail. Remove the offline mode and enable internet connection to have " "your cache be updated automatically, then you can go back to offline mode." ) else: - logger.warn( + logger.warning( "The cache for model files in Transformers v4.22.0 has been updated. Migrating your old cache. This is a " "one-time only operation. You can interrupt this and resume the migration later on by calling " "`transformers.utils.move_cache()`." @@ -1138,7 +1138,7 @@ def move_cache(cache_dir=None, new_cache_dir=None, token=None): with open(cache_version_file, "w") as f: f.write("1") except Exception: - logger.warn( + logger.warning( f"There was a problem when trying to write in your cache folder ({TRANSFORMERS_CACHE}). You should set " "the environment variable TRANSFORMERS_CACHE to a writable directory." ) From 9017ba4ca40d10dc38940545a7b3ac76d89ba8e8 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 16 Sep 2022 16:11:47 -0400 Subject: [PATCH 324/539] Fix tokenizer load from one file (#19073) * Fix tokenizer load from one file * Add a test * Style Co-authored-by: Lysandre --- src/transformers/tokenization_utils_base.py | 2 ++ tests/test_tokenization_common.py | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index 5062a7bfb99991..2e7ac0be0fb29a 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -1726,6 +1726,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], for file_id, file_path in vocab_files.items(): if file_path is None: resolved_vocab_files[file_id] = None + elif os.path.isfile(file_path): + resolved_vocab_files[file_id] = file_path elif is_remote_url(file_path): resolved_vocab_files[file_id] = download_url(file_path, proxies=proxies) else: diff --git a/tests/test_tokenization_common.py b/tests/test_tokenization_common.py index ce04fa3f842581..ef6eb421b44225 100644 --- a/tests/test_tokenization_common.py +++ b/tests/test_tokenization_common.py @@ -31,6 +31,7 @@ from typing import TYPE_CHECKING, Any, Dict, List, Tuple, Union from huggingface_hub import HfFolder, delete_repo, set_access_token +from huggingface_hub.file_download import http_get from parameterized import parameterized from requests.exceptions import HTTPError from transformers import ( @@ -3889,6 +3890,16 @@ def test_cached_files_are_used_when_internet_is_down(self): # This check we did call the fake head request mock_head.assert_called() + def test_legacy_load_from_one_file(self): + try: + tmp_file = tempfile.mktemp() + with open(tmp_file, "wb") as f: + http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", f) + + AlbertTokenizer.from_pretrained(tmp_file) + finally: + os.remove(tmp_file) + @is_staging_test class TokenizerPushToHubTester(unittest.TestCase): From 56c548f17ca3dee50131f02490e10070b0bd0e76 Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Fri, 16 Sep 2022 16:12:59 -0400 Subject: [PATCH 325/539] Note about developer mode (#19075) --- README.md | 2 ++ README_ko.md | 16 ++++++++-------- README_zh-hans.md | 16 ++++++++-------- README_zh-hant.md | 16 ++++++++-------- 4 files changed, 26 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 6071d7885d0218..31e4c5af04567d 100644 --- a/README.md +++ b/README.md @@ -249,6 +249,8 @@ conda install -c huggingface transformers Follow the installation pages of Flax, PyTorch or TensorFlow to see how to install them with conda. +> **_NOTE:_** On Windows, you may be prompted to activate Developer Mode in order to benefit from caching. If this is not an option for you, please let us know in [this issue](https://github.com/huggingface/huggingface_hub/issues/1062). + ## Model architectures **[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co) where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations). diff --git a/README_ko.md b/README_ko.md index 2a42664e20d771..a4ccc124acc2ff 100644 --- a/README_ko.md +++ b/README_ko.md @@ -237,18 +237,18 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch. -1. **[Deformable DETR](https://huggingface.co/docs/transformers/main/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. +1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. 1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. 1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT. 1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. -1. **[Donut](https://huggingface.co/docs/transformers/main/model_doc/donut)** (from NAVER) released with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. +1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER) released with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. -1. **[ERNIE](https://huggingface.co/docs/transformers/main/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. +1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. @@ -257,7 +257,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. 1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach -1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/main/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. +1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. 1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang. @@ -294,7 +294,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. -1. **[PEGASUS-X](https://huggingface.co/docs/transformers/main/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. @@ -317,7 +317,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy. 1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer. 1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo. -1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/main/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo. +1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo. 1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. @@ -329,7 +329,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. -1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. +1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. 1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. @@ -338,7 +338,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. -1. **[X-CLIP](https://huggingface.co/docs/transformers/main/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. +1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. diff --git a/README_zh-hans.md b/README_zh-hans.md index 7d0642cc9abff3..34839f54a3a808 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -261,18 +261,18 @@ conda install -c huggingface transformers 1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (来自 Microsoft) 伴随论文 [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) 由 Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen 发布。 1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (来自 Microsoft) 伴随论文 [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) 由 Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen 发布。 1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (来自 Berkeley/Facebook/Google) 伴随论文 [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) 由 Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch 发布。 -1. **[Deformable DETR](https://huggingface.co/docs/transformers/main/model_doc/deformable_detr)** (来自 SenseTime Research) 伴随论文 [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) 由 Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai 发布。 +1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (来自 SenseTime Research) 伴随论文 [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) 由 Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai 发布。 1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (来自 Facebook) 伴随论文 [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) 由 Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou 发布。 1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (来自 Facebook) 伴随论文 [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) 由 Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko 发布。 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (来自 Microsoft Research) 伴随论文 [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) 由 Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan 发布。 1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (来自 HuggingFace), 伴随论文 [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 同样的方法也应用于压缩 GPT-2 到 [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa 到 [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT 到 [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) 和德语版 DistilBERT。 1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (来自 Microsoft Research) 伴随论文 [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) 由 Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei 发布。 -1. **[Donut](https://huggingface.co/docs/transformers/main/model_doc/donut)** (来自 NAVER) 伴随论文 [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) 由 Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park 发布。 +1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (来自 NAVER) 伴随论文 [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) 由 Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park 发布。 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (来自 Facebook) 伴随论文 [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) 由 Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih 发布。 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (来自 Intel Labs) 伴随论文 [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) 由 René Ranftl, Alexey Bochkovskiy, Vladlen Koltun 发布。 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。 -1. **[ERNIE](https://huggingface.co/docs/transformers/main/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。 +1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (来自 CNRS) 伴随论文 [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) 由 Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab 发布。 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (来自 Facebook AI) 伴随论文 [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) 由 Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela 发布。 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (来自 Google Research) 伴随论文 [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) 由 James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon 发布。 @@ -281,7 +281,7 @@ conda install -c huggingface transformers 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (来自 OpenAI) 伴随论文 [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) 由 Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever 发布。 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (来自 EleutherAI) 随仓库 [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) 发布。作者为 Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy 发布。 1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach -1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/main/model_doc/gpt_neox_japanese)** (来自 ABEJA) 由 Shinya Otani, Takayoshi Makabe, Anuj Arora, Kyo Hattori。 +1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (来自 ABEJA) 由 Shinya Otani, Takayoshi Makabe, Anuj Arora, Kyo Hattori。 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (来自 OpenAI) 伴随论文 [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) 由 Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** 发布。 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (来自 EleutherAI) 伴随论文 [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) 由 Ben Wang and Aran Komatsuzaki 发布。 1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (来自 UCSD, NVIDIA) 伴随论文 [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) 由 Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang 发布。 @@ -318,7 +318,7 @@ conda install -c huggingface transformers 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。 -1. **[PEGASUS-X](https://huggingface.co/docs/transformers/main/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。 +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (来自 UCLA NLP) 伴随论文 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 由 Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 发布。 @@ -341,7 +341,7 @@ conda install -c huggingface transformers 1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (来自 Tel Aviv University) 伴随论文 [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) 由 Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy 发布。 1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (来自 Berkeley) 伴随论文 [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) 由 Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer 发布。 1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (来自 Microsoft) 伴随论文 [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) 由 Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo 发布。 -1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/main/model_doc/swinv2)** (来自 Microsoft) 伴随论文 [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) 由 Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo 发布。 +1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (来自 Microsoft) 伴随论文 [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) 由 Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo 发布。 1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (来自 Google AI) 伴随论文 [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。 @@ -353,7 +353,7 @@ conda install -c huggingface transformers 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。 -1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (来自 Multimedia Computing Group, Nanjing University) 伴随论文 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 由 Zhan Tong, Yibing Song, Jue Wang, Limin Wang 发布。 +1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (来自 Multimedia Computing Group, Nanjing University) 伴随论文 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 由 Zhan Tong, Yibing Song, Jue Wang, Limin Wang 发布。 1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (来自 NAVER AI Lab/Kakao Enterprise/Kakao Brain) 伴随论文 [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) 由 Wonjae Kim, Bokyung Son, Ildoo Kim 发布。 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。 @@ -362,7 +362,7 @@ conda install -c huggingface transformers 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (来自 Facebook AI) 伴随论文 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 发布。 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. -1. **[X-CLIP](https://huggingface.co/docs/transformers/main/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。 +1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index c4de5181002d73..9e6a4a0b0ecfe4 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -273,18 +273,18 @@ conda install -c huggingface transformers 1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. 1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch. -1. **[Deformable DETR](https://huggingface.co/docs/transformers/main/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. +1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai. 1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou. 1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko. 1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan. 1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German version of DistilBERT. 1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei. -1. **[Donut](https://huggingface.co/docs/transformers/main/model_doc/donut)** (from NAVER) released with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. +1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER) released with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park. 1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih. 1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun. 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. -1. **[ERNIE](https://huggingface.co/docs/transformers/main/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. +1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. @@ -293,7 +293,7 @@ conda install -c huggingface transformers 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. 1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach -1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/main/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. +1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released with the paper [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. 1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang. @@ -330,7 +330,7 @@ conda install -c huggingface transformers 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. -1. **[PEGASUS-X](https://huggingface.co/docs/transformers/main/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. @@ -353,7 +353,7 @@ conda install -c huggingface transformers 1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University) released with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy. 1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer. 1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo. -1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/main/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo. +1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo. 1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. @@ -365,7 +365,7 @@ conda install -c huggingface transformers 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. -1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. +1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. 1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. @@ -374,7 +374,7 @@ conda install -c huggingface transformers 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. -1. **[X-CLIP](https://huggingface.co/docs/transformers/main/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. +1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. From 7d0486c106dc01a3abaf8588a7d0a0c453208954 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 16 Sep 2022 22:15:02 +0200 Subject: [PATCH 326/539] Bump mako in /examples/research_projects/decision_transformer (#19077) Bumps [mako](https://github.com/sqlalchemy/mako) from 1.2.0 to 1.2.2. - [Release notes](https://github.com/sqlalchemy/mako/releases) - [Changelog](https://github.com/sqlalchemy/mako/blob/main/CHANGES) - [Commits](https://github.com/sqlalchemy/mako/commits) --- updated-dependencies: - dependency-name: mako dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../research_projects/decision_transformer/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/decision_transformer/requirements.txt b/examples/research_projects/decision_transformer/requirements.txt index bf3dd4f1777f7c..6add82e1d5446f 100644 --- a/examples/research_projects/decision_transformer/requirements.txt +++ b/examples/research_projects/decision_transformer/requirements.txt @@ -104,7 +104,7 @@ kubernetes==12.0.1 libclang==13.0.0 librosa==0.9.1 llvmlite==0.38.0 -Mako==1.2.0 +Mako==1.2.2 Markdown==3.3.6 MarkupSafe==1.1.1 matplotlib==3.5.1 From ae219532e314be51e7a1cd1583f9ffc208a22979 Mon Sep 17 00:00:00 2001 From: flozi00 Date: Fri, 16 Sep 2022 22:16:00 +0200 Subject: [PATCH 327/539] german autoclass (#19049) * german autoclass * Update _toctree.yml --- docs/source/de/_toctree.yml | 2 + docs/source/de/autoclass_tutorial.mdx | 127 ++++++++++++++++++++++++++ 2 files changed, 129 insertions(+) create mode 100644 docs/source/de/autoclass_tutorial.mdx diff --git a/docs/source/de/_toctree.yml b/docs/source/de/_toctree.yml index 69f5df0a82e5b4..98b56cfa307628 100644 --- a/docs/source/de/_toctree.yml +++ b/docs/source/de/_toctree.yml @@ -9,4 +9,6 @@ - sections: - local: pipeline_tutorial title: Pipelines für Inferenzen + - local: autoclass_tutorial + title: Laden von vortrainierten Instanzen mit einer AutoClass title: Tutorials diff --git a/docs/source/de/autoclass_tutorial.mdx b/docs/source/de/autoclass_tutorial.mdx new file mode 100644 index 00000000000000..95247cd04ba0ce --- /dev/null +++ b/docs/source/de/autoclass_tutorial.mdx @@ -0,0 +1,127 @@ + + +# Vortrainierte Instanzen mit einer AutoClass laden + +Bei so vielen verschiedenen Transformator-Architekturen kann es eine Herausforderung sein, eine für Ihren Checkpoint zu erstellen. Als Teil der 🤗 Transformers Kernphilosophie, die Bibliothek leicht, einfach und flexibel nutzbar zu machen, leitet eine `AutoClass` automatisch die richtige Architektur aus einem gegebenen Checkpoint ab und lädt sie. Mit der Methode `from_pretrained()` kann man schnell ein vortrainiertes Modell für eine beliebige Architektur laden, so dass man keine Zeit und Ressourcen aufwenden muss, um ein Modell von Grund auf zu trainieren. Die Erstellung dieser Art von Checkpoint-agnostischem Code bedeutet, dass Ihr Code, wenn er für einen Checkpoint funktioniert, auch mit einem anderen Checkpoint funktionieren wird - solange er für eine ähnliche Aufgabe trainiert wurde - selbst wenn die Architektur unterschiedlich ist. + + + +Denken Sie daran, dass sich die Architektur auf das Skelett des Modells bezieht und die Checkpoints die Gewichte für eine bestimmte Architektur sind. Zum Beispiel ist [BERT](https://huggingface.co/bert-base-uncased) eine Architektur, während `bert-base-uncased` ein Checkpoint ist. Modell ist ein allgemeiner Begriff, der entweder Architektur oder Prüfpunkt bedeuten kann. + + + +In dieser Anleitung lernen Sie, wie man: + +* Einen vortrainierten Tokenizer lädt. +* Einen vortrainierten Merkmalsextraktor lädt. +* Einen vortrainierten Prozessor lädt. +* Ein vortrainiertes Modell lädt. + +## AutoTokenizer + +Nahezu jede NLP-Aufgabe beginnt mit einem Tokenizer. Ein Tokenizer wandelt Ihre Eingabe in ein Format um, das vom Modell verarbeitet werden kann. + +Laden Sie einen Tokenizer mit [`AutoTokenizer.from_pretrained`]: + +```py +>>> from transformers import AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") +``` + +Dann tokenisieren Sie Ihre Eingabe wie unten gezeigt: + +```py +>>> sequence = "In a hole in the ground there lived a hobbit." +>>> print(tokenizer(sequence)) +{'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102], + 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} +``` + +## AutoFeatureExtractor + +Für Audio- und Bildverarbeitungsaufgaben verarbeitet ein Merkmalsextraktor das Audiosignal oder Bild in das richtige Eingabeformat. + +Laden Sie einen Merkmalsextraktor mit [`AutoFeatureExtractor.from_pretrained`]: + +```py +>>> from transformers import AutoFeatureExtractor + +>>> feature_extractor = AutoFeatureExtractor.from_pretrained( +... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" +... ) +``` + +## AutoProcessor + +Multimodale Aufgaben erfordern einen Prozessor, der zwei Arten von Vorverarbeitungswerkzeugen kombiniert. Das Modell [LayoutLMV2](model_doc/layoutlmv2) beispielsweise benötigt einen Feature-Extraktor für Bilder und einen Tokenizer für Text; ein Prozessor kombiniert beide. + +Laden Sie einen Prozessor mit [`AutoProcessor.from_pretrained`]: + +```py +>>> from transformers import AutoProcessor + +>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased") +``` + +## AutoModel + + + +Mit den `AutoModelFor`-Klassen können Sie schließlich ein vortrainiertes Modell für eine bestimmte Aufgabe laden (siehe [hier](model_doc/auto) für eine vollständige Liste der verfügbaren Aufgaben). Laden Sie zum Beispiel ein Modell für die Sequenzklassifikation mit [`AutoModelForSequenceClassification.from_pretrained`]: + +```py +>>> from transformers import AutoModelForSequenceClassification + +>>> model = AutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased") +``` + +Sie können denselben Prüfpunkt problemlos wiederverwenden, um eine Architektur für eine andere Aufgabe zu laden: + +```py +>>> from transformers import AutoModelForTokenClassification + +>>> model = AutoModelForTokenClassification.from_pretrained("distilbert-base-uncased") +``` + + + +Für PyTorch-Modelle verwendet die Methode `from_pretrained()` `torch.load()`, die intern `pickle` verwendet und als unsicher bekannt ist. Generell sollte man niemals ein Modell laden, das aus einer nicht vertrauenswürdigen Quelle stammen könnte, oder das manipuliert worden sein könnte. Dieses Sicherheitsrisiko wird für öffentliche Modelle, die auf dem Hugging Face Hub gehostet werden, teilweise gemildert, da diese bei jeder Übertragung [auf Malware](https://huggingface.co/docs/hub/security-malware) gescannt werden. Siehe die [Hub-Dokumentation](https://huggingface.co/docs/hub/security) für Best Practices wie [signierte Commit-Verifizierung](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg) mit GPG. + +TensorFlow- und Flax-Checkpoints sind nicht betroffen und können in PyTorch-Architekturen mit den Kwargs `from_tf` und `from_flax` für die Methode `from_pretrained` geladen werden, um dieses Problem zu umgehen. + + + +Im Allgemeinen empfehlen wir die Verwendung der Klasse "AutoTokenizer" und der Klasse "AutoModelFor", um trainierte Instanzen von Modellen zu laden. Dadurch wird sichergestellt, dass Sie jedes Mal die richtige Architektur laden. Im nächsten [Tutorial] (Vorverarbeitung) erfahren Sie, wie Sie Ihren neu geladenen Tokenizer, Feature Extractor und Prozessor verwenden, um einen Datensatz für die Feinabstimmung vorzuverarbeiten. + + +Mit den Klassen `TFAutoModelFor` schließlich können Sie ein vortrainiertes Modell für eine bestimmte Aufgabe laden (siehe [hier](model_doc/auto) für eine vollständige Liste der verfügbaren Aufgaben). Laden Sie zum Beispiel ein Modell für die Sequenzklassifikation mit [`TFAutoModelForSequenceClassification.from_pretrained`]: + +```py +>>> from transformers import TFAutoModelForSequenceClassification + +>>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert-base-uncased") +``` + +Sie können denselben Prüfpunkt problemlos wiederverwenden, um eine Architektur für eine andere Aufgabe zu laden: + +```py +>>> from transformers import TFAutoModelForTokenClassification + +>>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert-base-uncased") +``` + +Im Allgemeinen empfehlen wir, die Klasse "AutoTokenizer" und die Klasse "TFAutoModelFor" zu verwenden, um vortrainierte Instanzen von Modellen zu laden. Dadurch wird sichergestellt, dass Sie jedes Mal die richtige Architektur laden. Im nächsten [Tutorial] (Vorverarbeitung) erfahren Sie, wie Sie Ihren neu geladenen Tokenizer, Feature Extractor und Prozessor verwenden, um einen Datensatz für die Feinabstimmung vorzuverarbeiten. + + From ca485e562b675341409e3e27724072fb11e10af7 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 16 Sep 2022 17:20:02 -0400 Subject: [PATCH 328/539] Add tests for legacy load by url and fix bugs (#19078) --- src/transformers/modeling_flax_utils.py | 2 +- src/transformers/modeling_tf_utils.py | 2 +- src/transformers/modeling_utils.py | 2 +- src/transformers/tokenization_utils_base.py | 2 +- tests/test_configuration_common.py | 6 ++++++ tests/test_feature_extraction_common.py | 6 ++++++ tests/test_modeling_common.py | 21 +++++++++++++++++++++ tests/test_modeling_tf_common.py | 19 +++++++++++++++++++ tests/test_tokenization_common.py | 7 ++++++- 9 files changed, 62 insertions(+), 5 deletions(-) diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index 92d307e8cd7e2d..3299b543b7f218 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -680,7 +680,7 @@ def from_pretrained( archive_file = pretrained_model_name_or_path is_local = True elif is_remote_url(pretrained_model_name_or_path): - archive_file = pretrained_model_name_or_path + filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: filename = WEIGHTS_NAME if from_pt else FLAX_WEIGHTS_NAME diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index a90d1f0ebe49cf..af4eab59087e27 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -2418,7 +2418,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): archive_file = pretrained_model_name_or_path + ".index" is_local = True elif is_remote_url(pretrained_model_name_or_path): - archive_file = pretrained_model_name_or_path + filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: # set correct filename diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index af32c3f98f6fe3..79a8542d8b27a3 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -2005,7 +2005,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index") is_local = True elif is_remote_url(pretrained_model_name_or_path): - archive_file = pretrained_model_name_or_path + filename = pretrained_model_name_or_path resolved_archive_file = download_url(pretrained_model_name_or_path) else: # set correct filename diff --git a/src/transformers/tokenization_utils_base.py b/src/transformers/tokenization_utils_base.py index 2e7ac0be0fb29a..54d562136db4a1 100644 --- a/src/transformers/tokenization_utils_base.py +++ b/src/transformers/tokenization_utils_base.py @@ -1670,7 +1670,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], init_configuration = {} is_local = os.path.isdir(pretrained_model_name_or_path) - if os.path.isfile(pretrained_model_name_or_path): + if os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): if len(cls.vocab_files_names) > 1: raise ValueError( f"Calling {cls.__name__}.from_pretrained() with the path to a single file or url is not " diff --git a/tests/test_configuration_common.py b/tests/test_configuration_common.py index a7283b5f31232d..c2d48ef6625410 100644 --- a/tests/test_configuration_common.py +++ b/tests/test_configuration_common.py @@ -360,6 +360,12 @@ def test_cached_files_are_used_when_internet_is_down(self): # This check we did call the fake head request mock_head.assert_called() + def test_legacy_load_from_url(self): + # This test is for deprecated behavior and can be removed in v5 + _ = BertConfig.from_pretrained( + "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" + ) + class ConfigurationVersioningTest(unittest.TestCase): def test_local_versioning(self): diff --git a/tests/test_feature_extraction_common.py b/tests/test_feature_extraction_common.py index 61bd85e8922107..7b7c33a9642c02 100644 --- a/tests/test_feature_extraction_common.py +++ b/tests/test_feature_extraction_common.py @@ -182,6 +182,12 @@ def test_cached_files_are_used_when_internet_is_down(self): # This check we did call the fake head request mock_head.assert_called() + def test_legacy_load_from_url(self): + # This test is for deprecated behavior and can be removed in v5 + _ = Wav2Vec2FeatureExtractor.from_pretrained( + "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" + ) + @is_staging_test class FeatureExtractorPushToHubTester(unittest.TestCase): diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 6c4814c1a87274..082f2a8a9057f9 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -33,6 +33,7 @@ import transformers from huggingface_hub import HfFolder, delete_repo, set_access_token +from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AutoConfig, @@ -2949,6 +2950,26 @@ def test_cached_files_are_used_when_internet_is_down(self): # This check we did call the fake head request mock_head.assert_called() + def test_load_from_one_file(self): + try: + tmp_file = tempfile.mktemp() + with open(tmp_file, "wb") as f: + http_get( + "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", f + ) + + config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") + _ = BertModel.from_pretrained(tmp_file, config=config) + finally: + os.remove(tmp_file) + + def test_legacy_load_from_url(self): + # This test is for deprecated behavior and can be removed in v5 + config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") + _ = BertModel.from_pretrained( + "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", config=config + ) + @require_torch @is_staging_test diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index 620d84083ea524..9977578b51b068 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -30,6 +30,7 @@ from datasets import Dataset from huggingface_hub import HfFolder, Repository, delete_repo, set_access_token +from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import is_tf_available, is_torch_available from transformers.configuration_utils import PretrainedConfig @@ -1927,6 +1928,24 @@ def test_cached_files_are_used_when_internet_is_down(self): # This check we did call the fake head request mock_head.assert_called() + def test_load_from_one_file(self): + try: + tmp_file = tempfile.mktemp() + with open(tmp_file, "wb") as f: + http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/tf_model.h5", f) + + config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") + _ = TFBertModel.from_pretrained(tmp_file, config=config) + finally: + os.remove(tmp_file) + + def test_legacy_load_from_url(self): + # This test is for deprecated behavior and can be removed in v5 + config = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert") + _ = TFBertModel.from_pretrained( + "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/tf_model.h5", config=config + ) + # tests whether the unpack_inputs function behaves as expected def test_unpack_inputs(self): class DummyModel: diff --git a/tests/test_tokenization_common.py b/tests/test_tokenization_common.py index ef6eb421b44225..48add3f4f9ce7d 100644 --- a/tests/test_tokenization_common.py +++ b/tests/test_tokenization_common.py @@ -3891,15 +3891,20 @@ def test_cached_files_are_used_when_internet_is_down(self): mock_head.assert_called() def test_legacy_load_from_one_file(self): + # This test is for deprecated behavior and can be removed in v5 try: tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", f) - AlbertTokenizer.from_pretrained(tmp_file) + _ = AlbertTokenizer.from_pretrained(tmp_file) finally: os.remove(tmp_file) + def test_legacy_load_from_url(self): + # This test is for deprecated behavior and can be removed in v5 + _ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model") + @is_staging_test class TokenizerPushToHubTester(unittest.TestCase): From ba7f2173cc578fe6d9f1cdb900d5af609f195cf6 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 19 Sep 2022 12:27:06 +0200 Subject: [PATCH 329/539] Add runner availability check (#19054) Co-authored-by: ydshieh --- .github/workflows/check_runner_status.yml | 57 ++++++++++++++++ .github/workflows/self-nightly-scheduled.yml | 38 ++++++++--- .github/workflows/self-past.yml | 63 ++++++++++++------ .github/workflows/self-push.yml | 68 +++++++++++++------- .github/workflows/self-scheduled.yml | 38 +++++++---- utils/check_self_hosted_runner.py | 43 +++++++++++++ utils/notification_service.py | 21 +++--- 7 files changed, 257 insertions(+), 71 deletions(-) create mode 100644 .github/workflows/check_runner_status.yml create mode 100644 utils/check_self_hosted_runner.py diff --git a/.github/workflows/check_runner_status.yml b/.github/workflows/check_runner_status.yml new file mode 100644 index 00000000000000..fa40a08cc0db0b --- /dev/null +++ b/.github/workflows/check_runner_status.yml @@ -0,0 +1,57 @@ +name: Self-hosted runner (check runner status) + +# Note that each job's dependencies go into a corresponding docker file. +# +# For example for `run_all_tests_torch_cuda_extensions_gpu` the docker image is +# `huggingface/transformers-pytorch-deepspeed-latest-gpu`, which can be found at +# `docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile` + +on: + repository_dispatch: + schedule: + # run per hour + - cron: "* */1 * * *" + +env: + TRANSFORMERS_IS_CI: yes + +jobs: + check_runner_status: + name: Check Runner Status + runs-on: ubuntu-latest + steps: + - name: Checkout transformers + uses: actions/checkout@v2 + with: + fetch-depth: 2 + + - name: Check Runner Status + run: python utils/check_self_hosted_runner.py --target_runners single-gpu-ci-runner-docker,multi-gpu-ci-runner-docker,single-gpu-scheduled-ci-runner-docker,multi-scheduled-scheduled-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + + send_results: + name: Send results to webhook + runs-on: ubuntu-latest + needs: check_runner_status + if: ${{ failure() }} + steps: + - name: Preliminary job status + shell: bash + run: | + echo "Runner availability: ${{ needs.check_runner_status.result }}" + + - uses: actions/checkout@v2 + - uses: actions/download-artifact@v2 + - name: Send message to Slack + env: + CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }} + CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }} + CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} + CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} + CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} + CI_EVENT: runner status check + RUNNER_STATUS: ${{ needs.check_runner_status.result }} + # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change + # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. + run: | + pip install slack_sdk + python utils/notification_service.py diff --git a/.github/workflows/self-nightly-scheduled.yml b/.github/workflows/self-nightly-scheduled.yml index 2b3283abf3b215..8865707dc0f631 100644 --- a/.github/workflows/self-nightly-scheduled.yml +++ b/.github/workflows/self-nightly-scheduled.yml @@ -23,8 +23,21 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - run_check_runners: + check_runner_status: + name: Check Runner Status + runs-on: ubuntu-latest + steps: + - name: Checkout transformers + uses: actions/checkout@v2 + with: + fetch-depth: 2 + + - name: Check Runner Status + run: python utils/check_self_hosted_runner.py --target_runners single-gpu-scheduled-ci-runner-docker,multi-gpu-scheduled-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + + check_runners: name: Check Runners + needs: check_runner_status strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -39,7 +52,7 @@ jobs: setup: name: Setup - needs: run_check_runners + needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -83,7 +96,7 @@ jobs: container: image: huggingface/transformers-all-latest-torch-nightly-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: [run_check_runners, setup] + needs: setup steps: - name: Echo folder ${{ matrix.folders }} shell: bash @@ -136,7 +149,7 @@ jobs: container: image: huggingface/transformers-all-latest-torch-nightly-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: [run_check_runners, setup] + needs: setup steps: - name: Echo folder ${{ matrix.folders }} shell: bash @@ -185,7 +198,7 @@ jobs: matrix: machine_type: [single-gpu, multi-gpu] runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} - needs: [run_check_runners, setup] + needs: setup container: image: huggingface/transformers-pytorch-deepspeed-nightly-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ @@ -236,13 +249,21 @@ jobs: name: Send results to webhook runs-on: ubuntu-latest if: always() - needs: [run_check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, run_all_tests_torch_cuda_extensions_gpu] + needs: [ + check_runner_status, + check_runners, + setup, + run_tests_single_gpu, + run_tests_multi_gpu, + run_all_tests_torch_cuda_extensions_gpu + ] steps: - name: Preliminary job status shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner status: ${{ needs.run_check_runners.result }}" + echo "Runner availability: ${{ needs.check_runner_status.result }}" + echo "Runner status: ${{ needs.check_runners.result }}" echo "Setup status: ${{ needs.setup.result }}" - uses: actions/checkout@v2 @@ -255,8 +276,9 @@ jobs: CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }} CI_EVENT: nightly-build + RUNNER_STATUS: ${{ needs.check_runner_status.result }} + RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} - RUNNER_STATUS: ${{ needs.run_check_runners.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index 8e9130023bb36c..71814cf3f32ba3 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -27,9 +27,43 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: + check_runner_status: + name: Check Runner Status + runs-on: ubuntu-latest + steps: + - name: Checkout transformers + uses: actions/checkout@v2 + with: + fetch-depth: 2 + + - name: Check Runner Status + run: python utils/check_self_hosted_runner.py --target_runners single-gpu-past-ci-runner-docker,multi-gpu-past-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + + check_runners: + name: Check Runners + needs: check_runner_status + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} + container: + image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + steps: + - name: NVIDIA-SMI + run: | + nvidia-smi + setup: name: Setup - runs-on: ubuntu-latest + needs: check_runners + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} + container: + image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: @@ -50,21 +84,6 @@ jobs: cd tests echo "::set-output name=matrix::$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" - run_check_runners: - name: Check Runners - needs: setup - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker-past-ci') }} - container: - image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - run_tests_single_gpu: name: Model tests strategy: @@ -76,7 +95,7 @@ jobs: container: image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: [setup, run_check_runners] + needs: setup steps: - name: Update clone working-directory: /transformers @@ -129,7 +148,7 @@ jobs: container: image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: [setup, run_check_runners] + needs: setup steps: - name: Update clone working-directory: /transformers @@ -175,13 +194,14 @@ jobs: name: Send results to webhook runs-on: ubuntu-latest if: always() - needs: [setup, run_check_runners, run_tests_single_gpu, run_tests_multi_gpu] + needs: [check_runner_status, check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu] steps: - name: Preliminary job status shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner status: ${{ needs.run_check_runners.result }}" + echo "Runner availability: ${{ needs.check_runner_status.result }}" + echo "Runner status: ${{ needs.check_runners.result }}" echo "Setup status: ${{ needs.setup.result }}" - uses: actions/checkout@v2 @@ -199,8 +219,9 @@ jobs: CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }} CI_EVENT: Past CI - ${{ inputs.framework }}-${{ inputs.version }} + RUNNER_STATUS: ${{ needs.check_runner_status.result }} + RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} - RUNNER_STATUS: ${{ needs.run_check_runners.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index 56e425570c6b29..25da61ac66cf9b 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -27,9 +27,43 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: + check_runner_status: + name: Check Runner Status + runs-on: ubuntu-latest + steps: + - name: Checkout transformers + uses: actions/checkout@v2 + with: + fetch-depth: 2 + + - name: Check Runner Status + run: python utils/check_self_hosted_runner.py --target_runners single-gpu-ci-runner-docker,multi-gpu-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + + check_runners: + name: Check Runners + needs: check_runner_status + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] + container: + image: huggingface/transformers-all-latest-gpu + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ + steps: + - name: NVIDIA-SMI + run: | + nvidia-smi + setup: name: Setup - runs-on: ubuntu-latest + needs: check_runners + strategy: + matrix: + machine_type: [single-gpu, multi-gpu] + runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] + container: + image: huggingface/transformers-all-latest-gpu + options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} test_map: ${{ steps.set-matrix.outputs.test_map }} @@ -111,24 +145,9 @@ jobs: echo "::set-output name=matrix::$keys" echo "::set-output name=test_map::$test_map" - run_check_runners: - name: Check Runners - needs: setup - strategy: - matrix: - machine_type: [single-gpu, multi-gpu] - runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] - container: - image: huggingface/transformers-all-latest-gpu - options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - steps: - - name: NVIDIA-SMI - run: | - nvidia-smi - run_tests_single_gpu: name: Model tests - needs: [setup, run_check_runners] + needs: setup # `dummy` means there is no test to run if: contains(fromJson(needs.setup.outputs.matrix), 'dummy') != true strategy: @@ -213,7 +232,7 @@ jobs: run_tests_multi_gpu: name: Model tests - needs: [setup, run_check_runners] + needs: setup # `dummy` means there is no test to run if: contains(fromJson(needs.setup.outputs.matrix), 'dummy') != true strategy: @@ -300,7 +319,7 @@ jobs: run_tests_torch_cuda_extensions_single_gpu: name: Torch CUDA extension tests - needs: [setup, run_check_runners] + needs: setup if: contains(fromJson(needs.setup.outputs.matrix), 'deepspeed') || contains(fromJson(needs.setup.outputs.matrix), 'extended') strategy: fail-fast: false @@ -382,7 +401,7 @@ jobs: run_tests_torch_cuda_extensions_multi_gpu: name: Torch CUDA extension tests - needs: [setup, run_check_runners] + needs: setup if: contains(fromJson(needs.setup.outputs.matrix), 'deepspeed') || contains(fromJson(needs.setup.outputs.matrix), 'extended') strategy: fail-fast: false @@ -467,8 +486,9 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ + check_runner_status, + check_runners, setup, - run_check_runners, run_tests_single_gpu, run_tests_multi_gpu, run_tests_torch_cuda_extensions_single_gpu, @@ -479,8 +499,9 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | + echo "Runner availability: ${{ needs.check_runner_status.result }}" echo "Setup status: ${{ needs.setup.result }}" - echo "Runner status: ${{ needs.run_check_runners.result }}" + echo "Runner status: ${{ needs.check_runners.result }}" # Necessary to get the correct branch name and commit SHA for `workflow_run` event # We also take into account the `push` event (we might want to test some changes in a branch) @@ -527,8 +548,9 @@ jobs: CI_TITLE_PUSH: ${{ github.event.head_commit.message }} CI_TITLE_WORKFLOW_RUN: ${{ github.event.workflow_run.head_commit.message }} CI_SHA: ${{ env.CI_SHA }} + RUNNER_STATUS: ${{ needs.check_runner_status.result }} + RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} - RUNNER_STATUS: ${{ needs.run_check_runners.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index a0a10921ae825b..7de69a573e3852 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -22,8 +22,21 @@ env: RUN_PT_TF_CROSS_TESTS: 1 jobs: - run_check_runners: + check_runner_status: + name: Check Runner Status + runs-on: ubuntu-latest + steps: + - name: Checkout transformers + uses: actions/checkout@v2 + with: + fetch-depth: 2 + + - name: Check Runner Status + run: python utils/check_self_hosted_runner.py --target_runners single-gpu-scheduled-ci-runner-docker,multi-gpu-scheduled-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + + check_runners: name: Check Runners + needs: check_runner_status strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -38,7 +51,7 @@ jobs: setup: name: Setup - needs: run_check_runners + needs: check_runners strategy: matrix: machine_type: [single-gpu, multi-gpu] @@ -82,7 +95,7 @@ jobs: container: image: huggingface/transformers-all-latest-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: [run_check_runners, setup] + needs: setup steps: - name: Echo folder ${{ matrix.folders }} shell: bash @@ -135,7 +148,7 @@ jobs: container: image: huggingface/transformers-all-latest-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: [run_check_runners, setup] + needs: setup steps: - name: Echo folder ${{ matrix.folders }} shell: bash @@ -183,7 +196,7 @@ jobs: container: image: huggingface/transformers-all-latest-gpu options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: [run_check_runners, setup] + needs: setup steps: - name: Update clone working-directory: /transformers @@ -226,7 +239,7 @@ jobs: container: image: huggingface/transformers-pytorch-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: [run_check_runners, setup] + needs: setup steps: - name: Update clone working-directory: /transformers @@ -270,7 +283,7 @@ jobs: container: image: huggingface/transformers-tensorflow-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ - needs: [run_check_runners, setup] + needs: setup steps: - name: Update clone working-directory: /transformers @@ -312,7 +325,7 @@ jobs: matrix: machine_type: [single-gpu, multi-gpu] runs-on: ${{ format('{0}-{1}', matrix.machine_type, 'docker') }} - needs: [run_check_runners, setup] + needs: setup container: image: huggingface/transformers-pytorch-deepspeed-latest-gpu options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ @@ -362,7 +375,8 @@ jobs: runs-on: ubuntu-latest if: always() needs: [ - run_check_runners, + check_runner_status, + check_runners, setup, run_tests_single_gpu, run_tests_multi_gpu, @@ -376,7 +390,8 @@ jobs: shell: bash # For the meaning of these environment variables, see the job `Setup` run: | - echo "Runner status: ${{ needs.run_check_runners.result }}" + echo "Runner availability: ${{ needs.check_runner_status.result }}" + echo "Runner status: ${{ needs.check_runners.result }}" echo "Setup status: ${{ needs.setup.result }}" - uses: actions/checkout@v2 @@ -389,8 +404,9 @@ jobs: CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }} CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} CI_EVENT: scheduled + RUNNER_STATUS: ${{ needs.check_runner_status.result }} + RUNNER_ENV_STATUS: ${{ needs.check_runners.result }} SETUP_STATUS: ${{ needs.setup.result }} - RUNNER_STATUS: ${{ needs.run_check_runners.result }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | diff --git a/utils/check_self_hosted_runner.py b/utils/check_self_hosted_runner.py new file mode 100644 index 00000000000000..47049d9925ee31 --- /dev/null +++ b/utils/check_self_hosted_runner.py @@ -0,0 +1,43 @@ +import argparse +import json +import subprocess + + +def get_runner_status(target_runners, token): + + cmd = ( + f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' + " https://api.github.com/repos/huggingface/transformers/actions/runners" + ) + output = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE) + o = output.stdout.decode("utf-8") + status = json.loads(o) + + runners = status["runners"] + for runner in runners: + if runner["name"] in target_runners: + if runner["status"] == "offline": + raise ValueError(f"{runner['name']} is offline!") + + +if __name__ == "__main__": + + def list_str(values): + return values.split(",") + + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--target_runners", + default=None, + type=list_str, + required=True, + help="Comma-separated list of runners to check status.", + ) + + parser.add_argument( + "--token", default=None, type=str, required=True, help="A token that has actions:read permission." + ) + args = parser.parse_args() + + get_runner_status(args.target_runners, args.token) diff --git a/utils/notification_service.py b/utils/notification_service.py index 9ed97236d46270..5b5fdd46f13f9d 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -387,7 +387,7 @@ def payload(self) -> str: return json.dumps(blocks) @staticmethod - def error_out(title, ci_title="", setup_failed=False, runner_failed=False): + def error_out(title, ci_title="", runner_not_available=False, runner_failed=False, setup_failed=False): blocks = [] title_block = {"type": "header", "text": {"type": "plain_text", "text": title}} @@ -397,10 +397,12 @@ def error_out(title, ci_title="", setup_failed=False, runner_failed=False): ci_title_block = {"type": "section", "text": {"type": "mrkdwn", "text": ci_title}} blocks.append(ci_title_block) - if setup_failed: - text = "💔 Setup job failed. Tests are not run. 😭" + if runner_not_available: + text = "💔 CI runners are not available! Tests are not run. 😭" elif runner_failed: text = "💔 CI runners have problems! Tests are not run. 😭" + elif setup_failed: + text = "💔 Setup job failed. Tests are not run. 😭" else: text = "💔 There was an issue running the tests. 😭" @@ -654,10 +656,13 @@ def prepare_reports(title, header, reports, to_truncate=True): if __name__ == "__main__": - setup_status = os.environ.get("SETUP_STATUS") runner_status = os.environ.get("RUNNER_STATUS") + runner_env_status = os.environ.get("RUNNER_ENV_STATUS") + setup_status = os.environ.get("SETUP_STATUS") + + runner_not_available = True if runner_status is not None and runner_status != "success" else False + runner_failed = True if runner_env_status is not None and runner_env_status != "success" else False setup_failed = True if setup_status is not None and setup_status != "success" else False - runner_failed = True if runner_status is not None and runner_status != "success" else False org = "huggingface" repo = "transformers" @@ -718,8 +723,8 @@ def prepare_reports(title, header, reports, to_truncate=True): else: ci_title = "" - if setup_failed or runner_failed: - Message.error_out(title, ci_title, setup_failed, runner_failed) + if runner_not_available or runner_failed or setup_failed: + Message.error_out(title, ci_title, runner_not_available, runner_failed, setup_failed) exit(0) arguments = sys.argv[1:][0] @@ -728,7 +733,7 @@ def prepare_reports(title, header, reports, to_truncate=True): # Need to change from elements like `models/bert` to `models_bert` (the ones used as artifact names). models = [x.replace("models/", "models_") for x in models] except SyntaxError: - Message.error_out() + Message.error_out(title, ci_title) raise ValueError("Errored out.") github_actions_job_links = get_job_links() From 22264f933d857cfe6335dfd59380dc1a9a113593 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 19 Sep 2022 13:09:24 +0200 Subject: [PATCH 330/539] fix working dir (#19101) Co-authored-by: ydshieh --- .github/workflows/self-push.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index 25da61ac66cf9b..97a76554df3ef0 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -102,6 +102,7 @@ jobs: fetch-depth: 2 - name: Update clone using environment variables + working-directory: /transformers run: | echo "original branch = $(git branch --show-current)" git fetch && git checkout ${{ env.CI_BRANCH }} From fbe8464b5babd5fa417b373e9428150ea77b3a8c Mon Sep 17 00:00:00 2001 From: "S.Kishore" Date: Mon, 19 Sep 2022 17:58:13 +0530 Subject: [PATCH 331/539] Added type hints for TFConvBertModel (#19088) --- .../models/convbert/modeling_tf_convbert.py | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/transformers/models/convbert/modeling_tf_convbert.py b/src/transformers/models/convbert/modeling_tf_convbert.py index f62718af5fe590..f69e54282d02f1 100644 --- a/src/transformers/models/convbert/modeling_tf_convbert.py +++ b/src/transformers/models/convbert/modeling_tf_convbert.py @@ -744,17 +744,17 @@ def __init__(self, config, *inputs, **kwargs): ) def call( self, - input_ids=None, - attention_mask=None, - token_type_ids=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - training=False, - ): + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.array, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.array, tf.Tensor]] = None, + position_ids: Optional[Union[np.array, tf.Tensor]] = None, + head_mask: Optional[Union[np.array, tf.Tensor]] = None, + inputs_embeds: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: outputs = self.convbert( input_ids=input_ids, attention_mask=attention_mask, From 1bbad7a2daa57400bffd44ce88742f32b7b59f75 Mon Sep 17 00:00:00 2001 From: "S.Kishore" Date: Mon, 19 Sep 2022 18:07:18 +0530 Subject: [PATCH 332/539] Added Type hints for VIT MAE (#19085) * Added Type hints for VIT MAE * Ran make fixup --- .../models/vit_mae/modeling_vit_mae.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/src/transformers/models/vit_mae/modeling_vit_mae.py b/src/transformers/models/vit_mae/modeling_vit_mae.py index 0667bdd73c5545..20ed4452712ec2 100755 --- a/src/transformers/models/vit_mae/modeling_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_vit_mae.py @@ -665,13 +665,13 @@ class PreTrainedModel @replace_return_docstrings(output_type=ViTMAEModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, - pixel_values=None, - noise=None, - head_mask=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + pixel_values: Optional[torch.FloatTensor] = None, + noise: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, ViTMAEModelOutput]: r""" Returns: @@ -957,13 +957,13 @@ def forward_loss(self, pixel_values, pred, mask): @replace_return_docstrings(output_type=ViTMAEForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, - pixel_values=None, - noise=None, - head_mask=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + pixel_values: Optional[torch.FloatTensor] = None, + noise: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, ViTMAEForPreTrainingOutput]: r""" Returns: From fe5e7cea4ac9c7b0cca8c33b86a24827e8331311 Mon Sep 17 00:00:00 2001 From: "S.Kishore" Date: Mon, 19 Sep 2022 18:07:32 +0530 Subject: [PATCH 333/539] Add type hints for TF MPNet models (#19089) * Added type hints for TFMPNetModel * Added type hints for TFMPNetForMaskedLM * Added type hints for TFMPNetForSequenceClassification * Added type hints for TFMPNetForMultipleChoice * Added type hints for TFMPNetForTokenClassification * Added Type hints for TFMPNetForQuestionAnswering --- .../models/mpnet/modeling_tf_mpnet.py | 135 +++++++++--------- 1 file changed, 69 insertions(+), 66 deletions(-) diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index 3fc43184617063..3ceb1489a9bcdb 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -18,7 +18,9 @@ import math import warnings +from typing import Optional, Tuple, Union +import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation @@ -33,6 +35,7 @@ ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, + TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, @@ -681,16 +684,16 @@ def __init__(self, config, *inputs, **kwargs): ) def call( self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - training=False, - ): + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.array, tf.Tensor]] = None, + position_ids: Optional[Union[np.array, tf.Tensor]] = None, + head_mask: Optional[Union[np.array, tf.Tensor]] = None, + inputs_embeds: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: outputs = self.mpnet( input_ids=input_ids, attention_mask=attention_mask, @@ -796,17 +799,17 @@ def get_prefix_bias_name(self): ) def call( self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - labels=None, - training=False, - ): + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[tf.Tensor] = None, + training: bool = False, + ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., @@ -901,17 +904,17 @@ def __init__(self, config, *inputs, **kwargs): ) def call( self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - labels=None, - training=False, - ): + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.array, tf.Tensor]] = None, + position_ids: Optional[Union[np.array, tf.Tensor]] = None, + head_mask: Optional[Union[np.array, tf.Tensor]] = None, + inputs_embeds: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[tf.Tensor] = None, + training: bool = False, + ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., @@ -991,17 +994,17 @@ def dummy_inputs(self): ) def call( self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - labels=None, - training=False, - ): + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[tf.Tensor] = None, + training: bool = False, + ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` @@ -1102,17 +1105,17 @@ def __init__(self, config, *inputs, **kwargs): ) def call( self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - labels=None, - training=False, - ): + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[tf.Tensor] = None, + training: bool = False, + ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. @@ -1184,19 +1187,19 @@ def __init__(self, config, *inputs, **kwargs): ) def call( self, - input_ids=None, - attention_mask=None, - position_ids=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - start_positions=None, - end_positions=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.array, tf.Tensor]] = None, + position_ids: Optional[Union[np.array, tf.Tensor]] = None, + head_mask: Optional[Union[np.array, tf.Tensor]] = None, + inputs_embeds: Optional[tf.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + start_positions: Optional[tf.Tensor] = None, + end_positions: Optional[tf.Tensor] = None, + training: bool = False, **kwargs, - ): + ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. From 6f25d107fdd1caeac9229d9807df7695080d0115 Mon Sep 17 00:00:00 2001 From: "S.Kishore" Date: Mon, 19 Sep 2022 18:12:13 +0530 Subject: [PATCH 334/539] Added type hints to ResNetForImageClassification (#19084) * Added type hints to ResNetForImageClassification * Resolved check_repository_consistency failure issue Running fix-copies changed the type hints for RegNetForImageClassification in modeling_regnet.py file --- src/transformers/models/regnet/modeling_regnet.py | 8 ++++---- src/transformers/models/resnet/modeling_resnet.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/transformers/models/regnet/modeling_regnet.py b/src/transformers/models/regnet/modeling_regnet.py index 64b14dc54de813..f317bf47d75a41 100644 --- a/src/transformers/models/regnet/modeling_regnet.py +++ b/src/transformers/models/regnet/modeling_regnet.py @@ -407,10 +407,10 @@ def __init__(self, config): ) def forward( self, - pixel_values: Tensor = None, - labels: Tensor = None, - output_hidden_states: bool = None, - return_dict: bool = None, + pixel_values: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, ) -> ImageClassifierOutputWithNoAttention: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): diff --git a/src/transformers/models/resnet/modeling_resnet.py b/src/transformers/models/resnet/modeling_resnet.py index d8804d960443df..b771aa3e312507 100644 --- a/src/transformers/models/resnet/modeling_resnet.py +++ b/src/transformers/models/resnet/modeling_resnet.py @@ -370,10 +370,10 @@ def __init__(self, config): ) def forward( self, - pixel_values: Tensor = None, - labels: Tensor = None, - output_hidden_states: bool = None, - return_dict: bool = None, + pixel_values: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, ) -> ImageClassifierOutputWithNoAttention: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): From 0d1ba2dd0ba1b584c0380ee9f69c2b222b462d76 Mon Sep 17 00:00:00 2001 From: Partho Date: Mon, 19 Sep 2022 18:40:21 +0530 Subject: [PATCH 335/539] added type hints (#19076) --- src/transformers/models/sew_d/modeling_sew_d.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/transformers/models/sew_d/modeling_sew_d.py b/src/transformers/models/sew_d/modeling_sew_d.py index bcd95139c55898..b5dda8762f9f06 100644 --- a/src/transformers/models/sew_d/modeling_sew_d.py +++ b/src/transformers/models/sew_d/modeling_sew_d.py @@ -1194,11 +1194,11 @@ def __init__(self, config): def forward( self, - hidden_states, - attention_mask=None, - output_attentions=False, - output_hidden_states=False, - return_dict=True, + hidden_states: torch.tensor, + attention_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, ): max_encoder_length = hidden_states.shape[1] // self.config.squeeze_factor if attention_mask is None: From e7206ceab9db2dec4742cec6ac57b9a630bd6ea6 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Mon, 19 Sep 2022 19:22:34 +0200 Subject: [PATCH 336/539] Improve vision models docs (#19103) * Add tips * Add BEiT figure * Fix URL * Move tip to start * Add tip to TF model as well Co-authored-by: Niels Rogge --- docs/source/en/model_doc/beit.mdx | 5 +++++ docs/source/en/model_doc/vit.mdx | 12 +++++------- docs/source/en/model_doc/xclip.mdx | 3 ++- src/transformers/models/deit/modeling_deit.py | 11 +++++++++-- src/transformers/models/swin/modeling_swin.py | 11 +++++++++-- .../models/swinv2/modeling_swinv2.py | 12 ++++++++++-- .../models/vit/modeling_tf_vit.py | 8 ++++++++ src/transformers/models/vit/modeling_vit.py | 19 +++++++++++++++++-- .../models/vit_mae/modeling_vit_mae.py | 11 ++++++++++- 9 files changed, 75 insertions(+), 17 deletions(-) diff --git a/docs/source/en/model_doc/beit.mdx b/docs/source/en/model_doc/beit.mdx index 625357810ded9f..f8177443d13a23 100644 --- a/docs/source/en/model_doc/beit.mdx +++ b/docs/source/en/model_doc/beit.mdx @@ -59,6 +59,11 @@ Tips: `use_relative_position_bias` attribute of [`BeitConfig`] to `True` in order to add position embeddings. + + + BEiT pre-training. Taken from the original paper. + This model was contributed by [nielsr](https://huggingface.co/nielsr). The JAX/FLAX version of this model was contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/beit). diff --git a/docs/source/en/model_doc/vit.mdx b/docs/source/en/model_doc/vit.mdx index 37c469f6aaaefc..5978d4518e3af1 100644 --- a/docs/source/en/model_doc/vit.mdx +++ b/docs/source/en/model_doc/vit.mdx @@ -12,13 +12,6 @@ specific language governing permissions and limitations under the License. # Vision Transformer (ViT) - - -This is a recently introduced model so the API hasn't been tested extensively. There may be some bugs or slight -breaking changes to fix it in the future. If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title). - - - ## Overview The Vision Transformer (ViT) model was proposed in [An Image is Worth 16x16 Words: Transformers for Image Recognition @@ -63,6 +56,11 @@ Tips: language modeling). With this approach, the smaller ViT-B/16 model achieves 79.9% accuracy on ImageNet, a significant improvement of 2% to training from scratch, but still 4% behind supervised pre-training. + + + ViT architecture. Taken from the original paper. + Following the original Vision Transformer, some follow-up works have been made: - [DeiT](deit) (Data-efficient Image Transformers) by Facebook AI. DeiT models are distilled vision transformers. diff --git a/docs/source/en/model_doc/xclip.mdx b/docs/source/en/model_doc/xclip.mdx index 4d572b6760071d..96832f46e5b88a 100644 --- a/docs/source/en/model_doc/xclip.mdx +++ b/docs/source/en/model_doc/xclip.mdx @@ -23,7 +23,8 @@ The abstract from the paper is the following: Tips: -- Usage of X-CLIP is identical to CLIP. +- Usage of X-CLIP is identical to [CLIP](clip). +- Demo notebooks for X-CLIP can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/X-CLIP). drawing diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index 8f8307499fa479..44110f5e443557 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -555,8 +555,15 @@ def forward(self, hidden_states): @add_start_docstrings( - "DeiT Model with a decoder on top for masked image modeling, as proposed in" - " [SimMIM](https://arxiv.org/abs/2111.09886).", + """DeiT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886). + + + + Note that we provide a script to pre-train this model on custom data in our [examples + directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). + + + """, DEIT_START_DOCSTRING, ) class DeiTForMaskedImageModeling(DeiTPreTrainedModel): diff --git a/src/transformers/models/swin/modeling_swin.py b/src/transformers/models/swin/modeling_swin.py index 58d01d1cdfd45d..588a4200fb1ef8 100644 --- a/src/transformers/models/swin/modeling_swin.py +++ b/src/transformers/models/swin/modeling_swin.py @@ -1007,8 +1007,15 @@ def forward( @add_start_docstrings( - "Swin Model with a decoder on top for masked image modeling, as proposed in" - " [SimMIM](https://arxiv.org/abs/2111.09886).", + """Swin Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886). + + + + Note that we provide a script to pre-train this model on custom data in our [examples + directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). + + + """, SWIN_START_DOCSTRING, ) class SwinForMaskedImageModeling(SwinPreTrainedModel): diff --git a/src/transformers/models/swinv2/modeling_swinv2.py b/src/transformers/models/swinv2/modeling_swinv2.py index 890530691dd3a6..926a7dd2767943 100644 --- a/src/transformers/models/swinv2/modeling_swinv2.py +++ b/src/transformers/models/swinv2/modeling_swinv2.py @@ -1087,8 +1087,16 @@ def forward( @add_start_docstrings( - "Swinv2 Model with a decoder on top for masked image modeling, as proposed in" - " [SimMIM](https://arxiv.org/abs/2111.09886).", + """Swinv2 Model with a decoder on top for masked image modeling, as proposed in +[SimMIM](https://arxiv.org/abs/2111.09886). + + + + Note that we provide a script to pre-train this model on custom data in our [examples + directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). + + + """, SWINV2_START_DOCSTRING, ) # Copied from transformers.models.swin.modeling_swin.SwinForMaskedImageModeling with SWIN->SWINV2,Swin->Swinv2,swin->swinv2,224->256,window7->window8 diff --git a/src/transformers/models/vit/modeling_tf_vit.py b/src/transformers/models/vit/modeling_tf_vit.py index 754a86ce2814ee..727cbb75170b1e 100644 --- a/src/transformers/models/vit/modeling_tf_vit.py +++ b/src/transformers/models/vit/modeling_tf_vit.py @@ -733,6 +733,14 @@ def call(self, hidden_states: tf.Tensor) -> tf.Tensor: """ ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. + + + + Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by + setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained + position embeddings to the higher resolution. + + """, VIT_START_DOCSTRING, ) diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index 7017f232f0e9c7..e6df4baa701950 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -597,8 +597,15 @@ def forward(self, hidden_states): @add_start_docstrings( - "ViT Model with a decoder on top for masked image modeling, as proposed in" - " [SimMIM](https://arxiv.org/abs/2111.09886).", + """ViT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886). + + + + Note that we provide a script to pre-train this model on custom data in our [examples + directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). + + + """, VIT_START_DOCSTRING, ) class ViTForMaskedImageModeling(ViTPreTrainedModel): @@ -712,6 +719,14 @@ def forward( """ ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. + + + + Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by + setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained + position embeddings to the higher resolution. + + """, VIT_START_DOCSTRING, ) diff --git a/src/transformers/models/vit_mae/modeling_vit_mae.py b/src/transformers/models/vit_mae/modeling_vit_mae.py index 20ed4452712ec2..b612d2f67b091d 100755 --- a/src/transformers/models/vit_mae/modeling_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_vit_mae.py @@ -837,7 +837,16 @@ def custom_forward(*inputs): @add_start_docstrings( - "The ViTMAE Model transformer with the decoder on top for self-supervised pre-training.", + """The ViTMAE Model transformer with the decoder on top for self-supervised pre-training. + + + + Note that we provide a script to pre-train this model on custom data in our [examples + directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining). + + + + """, VIT_MAE_START_DOCSTRING, ) class ViTMAEForPreTraining(ViTMAEPreTrainedModel): From 6be338f1b97bc3de4341e9ba5fa7b03025381a49 Mon Sep 17 00:00:00 2001 From: flozi00 Date: Mon, 19 Sep 2022 19:51:43 +0200 Subject: [PATCH 337/539] correct spelling in README (#19092) --- docs/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README.md b/docs/README.md index 964a8b8b27a3a4..9edce16782f04d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -312,13 +312,13 @@ easily. # Testing documentation examples -Good documentation oftens comes with an example of how a specific function or class should be used. +Good documentation often comes with an example of how a specific function or class should be used. Each model class should contain at least one example showcasing how to use this model class in inference. *E.g.* the class [Wav2Vec2ForCTC](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC) includes an example of how to transcribe speech to text in the [docstring of its forward function](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2ForCTC.forward). -## Writing documenation examples +## Writing documentation examples The syntax for Example docstrings can look as follows: From 3b0cecb62789a8951858ebe57e08ba77f7a96922 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 19 Sep 2022 15:27:18 -0400 Subject: [PATCH 338/539] Don't warn of move if cache is empty (#19109) --- src/transformers/utils/hub.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index de6fab51b8f1cd..cd4c92e50b3774 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -1104,8 +1104,9 @@ def move_cache(cache_dir=None, new_cache_dir=None, token=None): with open(cache_version_file) as f: cache_version = int(f.read()) +cache_is_not_empty = os.path.isdir(TRANSFORMERS_CACHE) and len(os.listdir(TRANSFORMERS_CACHE)) > 0 -if cache_version < 1: +if cache_version < 1 and cache_is_not_empty: if is_offline_mode(): logger.warning( "You are offline and the cache for model files in Transformers v4.22.0 has been updated while your local " From 6227078d0a95aed688578d37b319e969a1dcd30f Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Tue, 20 Sep 2022 04:42:18 +0800 Subject: [PATCH 339/539] HPO: keep the original logic if there's only one process, pass the trial to trainer (#19096) need to find out solution for following cases *if we need to use trial in model_init, how to do it for non-main rank, sync the model with rank0 in app? *how to use optuna prune feature for DDP, if we do it in rank0, how does other rank know it. Signed-off-by: Wang, Yi A Signed-off-by: Wang, Yi A --- src/transformers/integrations.py | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/transformers/integrations.py b/src/transformers/integrations.py index b40dc8f42d9d5e..787f76672a174a 100644 --- a/src/transformers/integrations.py +++ b/src/transformers/integrations.py @@ -168,12 +168,14 @@ def _objective(trial, checkpoint_dir=None): if subdir.startswith(PREFIX_CHECKPOINT_DIR): checkpoint = os.path.join(checkpoint_dir, subdir) trainer.objective = None - trainer._hp_search_setup(trial) if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") + trainer._hp_search_setup(trial) torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) - trainer.train(resume_from_checkpoint=checkpoint) + trainer.train(resume_from_checkpoint=checkpoint) + else: + trainer.train(resume_from_checkpoint=checkpoint, trial=trial) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() @@ -362,12 +364,14 @@ def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> Be for run in experiment.loop(): with run: trainer.objective = None - trainer._hp_search_setup(run.run) if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + trainer._hp_search_setup(run.run) torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) - trainer.train(resume_from_checkpoint=None) + trainer.train(resume_from_checkpoint=None) + else: + trainer.train(resume_from_checkpoint=None, trial=run.run) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() @@ -397,12 +401,14 @@ def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> Be while experiment.progress.observation_count < experiment.observation_budget: suggestion = conn.experiments(experiment.id).suggestions().create() trainer.objective = None - trainer._hp_search_setup(suggestion) if trainer.args.world_size > 1: if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") + trainer._hp_search_setup(suggestion) torch.distributed.broadcast_object_list(pickle.dumps(trainer.args), src=0) - trainer.train(resume_from_checkpoint=None) + trainer.train(resume_from_checkpoint=None) + else: + trainer.train(resume_from_checkpoint=None, trial=suggestion) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() From 801ebd045d4310fef2e837713fa630cb183f0104 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 19 Sep 2022 16:55:50 -0400 Subject: [PATCH 340/539] Add documentation of Trainer.create_model_card (#19110) * Add documentation of Trainer.create_model_card * Expand to TF version --- src/transformers/modeling_tf_utils.py | 27 +++++++++++++++++++++ src/transformers/trainer.py | 35 +++++++++++++++++++++++---- 2 files changed, 57 insertions(+), 5 deletions(-) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index af4eab59087e27..d06c619cdf6a16 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -1601,6 +1601,33 @@ def create_model_card( dataset: Optional[Union[str, List[str]]] = None, dataset_args: Optional[Union[str, List[str]]] = None, ): + """ + Creates a draft of a model card using the information available to the `Trainer`. + + Args: + output_dir (`str` or `os.PathLike`): + The folder in which to create the model card. + model_name (`str`, *optional*): + The name of the model. + language (`str`, *optional*): + The language of the model (if applicable) + license (`str`, *optional*): + The license of the model. Will default to the license of the pretrained model used, if the original + model given to the `Trainer` comes from a repo on the Hub. + tags (`str` or `List[str]`, *optional*): + Some tags to be included in the metadata of the model card. + finetuned_from (`str`, *optional*): + The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo + of the original model given to the `Trainer` (if it comes from the Hub). + tasks (`str` or `List[str]`, *optional*): + One or several task identifiers, to be included in the metadata of the model card. + dataset_tags (`str` or `List[str]`, *optional*): + One or several dataset tags, to be included in the metadata of the model card. + dataset (`str` or `List[str]`, *optional*): + One or several dataset identifiers, to be included in the metadata of the model card. + dataset_args (`str` or `List[str]`, *optional*): + One or several dataset arguments, to be included in the metadata of the model card. + """ # Avoids a circular import by doing this when necessary. from .modelcard import TrainingSummary # tests_ignore diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index 6cae5a6ea0069f..c1869ef76f0055 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -3284,14 +3284,39 @@ def create_model_card( self, language: Optional[str] = None, license: Optional[str] = None, - tags: Optional[str] = None, + tags: Union[str, List[str], None] = None, model_name: Optional[str] = None, finetuned_from: Optional[str] = None, - tasks: Optional[str] = None, - dataset_tags: Optional[Union[str, List[str]]] = None, - dataset: Optional[Union[str, List[str]]] = None, - dataset_args: Optional[Union[str, List[str]]] = None, + tasks: Union[str, List[str], None] = None, + dataset_tags: Union[str, List[str], None] = None, + dataset: Union[str, List[str], None] = None, + dataset_args: Union[str, List[str], None] = None, ): + """ + Creates a draft of a model card using the information available to the `Trainer`. + + Args: + language (`str`, *optional*): + The language of the model (if applicable) + license (`str`, *optional*): + The license of the model. Will default to the license of the pretrained model used, if the original + model given to the `Trainer` comes from a repo on the Hub. + tags (`str` or `List[str]`, *optional*): + Some tags to be included in the metadata of the model card. + model_name (`str`, *optional*): + The name of the model. + finetuned_from (`str`, *optional*): + The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo + of the original model given to the `Trainer` (if it comes from the Hub). + tasks (`str` or `List[str]`, *optional*): + One or several task identifiers, to be included in the metadata of the model card. + dataset_tags (`str` or `List[str]`, *optional*): + One or several dataset tags, to be included in the metadata of the model card. + dataset (`str` or `List[str]`, *optional*): + One or several dataset identifiers, to be included in the metadata of the model card. + dataset_args (`str` or `List[str]`, *optional*): + One or several dataset arguments, to be included in the metadata of the model card. + """ if not self.is_world_process_zero(): return From 261301d38807c509152ca9d1fd2a57d573b35497 Mon Sep 17 00:00:00 2001 From: "S.Kishore" Date: Tue, 20 Sep 2022 03:34:25 +0530 Subject: [PATCH 341/539] Added type hints for YolosForObjectDetection (#19086) --- src/transformers/models/yolos/modeling_yolos.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/yolos/modeling_yolos.py b/src/transformers/models/yolos/modeling_yolos.py index d41e26c4e21c52..d2b8132cc8172a 100755 --- a/src/transformers/models/yolos/modeling_yolos.py +++ b/src/transformers/models/yolos/modeling_yolos.py @@ -737,12 +737,12 @@ def _set_aux_loss(self, outputs_class, outputs_coord): @replace_return_docstrings(output_type=YolosObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, - pixel_values, - labels=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - ): + pixel_values: torch.FloatTensor, + labels: Optional[List[Dict]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, YolosObjectDetectionOutput]: r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the From c81ebd1c39e3e1bc017a3affbba096dc9aedb5a0 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Tue, 20 Sep 2022 10:41:56 +0300 Subject: [PATCH 342/539] Beit postprocessing (#19099) * add post_process_semantic_segmentation method to BeiTFeatureExtractor --- docs/source/en/model_doc/beit.mdx | 1 + .../models/beit/feature_extraction_beit.py | 48 ++++++++++++++++++- 2 files changed, 47 insertions(+), 2 deletions(-) diff --git a/docs/source/en/model_doc/beit.mdx b/docs/source/en/model_doc/beit.mdx index f8177443d13a23..689eadc70a73c4 100644 --- a/docs/source/en/model_doc/beit.mdx +++ b/docs/source/en/model_doc/beit.mdx @@ -82,6 +82,7 @@ contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code [[autodoc]] BeitFeatureExtractor - __call__ + - post_process_semantic_segmentation ## BeitModel diff --git a/src/transformers/models/beit/feature_extraction_beit.py b/src/transformers/models/beit/feature_extraction_beit.py index 62b790621baf3e..eac1ba8e32a241 100644 --- a/src/transformers/models/beit/feature_extraction_beit.py +++ b/src/transformers/models/beit/feature_extraction_beit.py @@ -14,7 +14,7 @@ # limitations under the License. """Feature extractor class for BEiT.""" -from typing import Optional, Union +from typing import List, Optional, Tuple, Union import numpy as np from PIL import Image @@ -27,9 +27,12 @@ ImageInput, is_torch_tensor, ) -from ...utils import TensorType, logging +from ...utils import TensorType, is_torch_available, logging +if is_torch_available(): + import torch + logger = logging.get_logger(__name__) @@ -222,3 +225,44 @@ def __call__( encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs + + def post_process_semantic_segmentation(self, outputs, target_sizes: Union[TensorType, List[Tuple]] = None): + """ + Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. + + Args: + outputs ([`BeitForSemanticSegmentation`]): + Raw outputs of the model. + target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`, *optional*): + Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. If left to + None, predictions will not be resized. + Returns: + semantic_segmentation: `torch.Tensor` of shape `(batch_size, 2)` or `List[torch.Tensor]` of length + `batch_size`, where each item is a semantic segmentation map of of the corresponding target_sizes entry (if + `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. + """ + logits = outputs.logits + + if len(logits) != len(target_sizes): + raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") + + if target_sizes is not None and target_sizes.shape[1] != 2: + raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") + + semantic_segmentation = logits.argmax(dim=1) + + # Resize semantic segmentation maps + if target_sizes is not None: + if is_torch_tensor(target_sizes): + target_sizes = target_sizes.numpy() + + resized_maps = [] + semantic_segmentation = semantic_segmentation.numpy() + + for idx in range(len(semantic_segmentation)): + resized = self.resize(image=semantic_segmentation[idx], size=target_sizes[idx]) + resized_maps.append(resized) + + semantic_segmentation = [torch.Tensor(np.array(image)) for image in resized_maps] + + return semantic_segmentation From cc567e0063e8c67a3fcc22963a94ed71b2aade5f Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 20 Sep 2022 13:46:55 +0200 Subject: [PATCH 343/539] Fix the wrong schedule (#19117) Co-authored-by: ydshieh --- .github/workflows/check_runner_status.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check_runner_status.yml b/.github/workflows/check_runner_status.yml index fa40a08cc0db0b..2b61bfd971b2f7 100644 --- a/.github/workflows/check_runner_status.yml +++ b/.github/workflows/check_runner_status.yml @@ -10,7 +10,7 @@ on: repository_dispatch: schedule: # run per hour - - cron: "* */1 * * *" + - cron: "0 */1 * * *" env: TRANSFORMERS_IS_CI: yes From 67403413bd8f8e00759a9cffe8608e092fa7b519 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Tue, 20 Sep 2022 06:17:57 -0700 Subject: [PATCH 344/539] Change document question answering pipeline to always return an array (#19071) Co-authored-by: Ankur Goyal --- src/transformers/pipelines/document_question_answering.py | 2 -- tests/pipelines/test_pipelines_document_question_answering.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index b0fe18cb9dd6c2..a6afc069fb9fc7 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -383,8 +383,6 @@ def postprocess(self, model_outputs, top_k=1, **kwargs): answers = self.postprocess_extractive_qa(model_outputs, top_k=top_k, **kwargs) answers = sorted(answers, key=lambda x: x.get("score", 0), reverse=True)[:top_k] - if len(answers) == 1: - return answers[0] return answers def postprocess_donut(self, model_outputs, **kwargs): diff --git a/tests/pipelines/test_pipelines_document_question_answering.py b/tests/pipelines/test_pipelines_document_question_answering.py index 091f6c3c03b14a..92d618bfd67104 100644 --- a/tests/pipelines/test_pipelines_document_question_answering.py +++ b/tests/pipelines/test_pipelines_document_question_answering.py @@ -267,7 +267,7 @@ def test_large_model_pt_donut(self): image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) - self.assertEqual(nested_simplify(outputs, decimals=4), {"answer": "us-001"}) + self.assertEqual(nested_simplify(outputs, decimals=4), [{"answer": "us-001"}]) @require_tf @unittest.skip("Document question answering not implemented in TF") From de262416456dfbd66697b4be86b55c9f10804fa7 Mon Sep 17 00:00:00 2001 From: flozi00 Date: Tue, 20 Sep 2022 15:18:21 +0200 Subject: [PATCH 345/539] german processing (#19121) * correct spelling in README * processing --- docs/source/de/_toctree.yml | 2 + docs/source/de/preprocessing.mdx | 502 +++++++++++++++++++++++++++++++ 2 files changed, 504 insertions(+) create mode 100644 docs/source/de/preprocessing.mdx diff --git a/docs/source/de/_toctree.yml b/docs/source/de/_toctree.yml index 98b56cfa307628..7028e98ced540f 100644 --- a/docs/source/de/_toctree.yml +++ b/docs/source/de/_toctree.yml @@ -11,4 +11,6 @@ title: Pipelines für Inferenzen - local: autoclass_tutorial title: Laden von vortrainierten Instanzen mit einer AutoClass + - local: preprocessing + title: Vorverarbeiten title: Tutorials diff --git a/docs/source/de/preprocessing.mdx b/docs/source/de/preprocessing.mdx new file mode 100644 index 00000000000000..ea6c185cc10155 --- /dev/null +++ b/docs/source/de/preprocessing.mdx @@ -0,0 +1,502 @@ + + +# Vorverarbeiten + +[[open-in-colab]] + +Bevor Sie Ihre Daten in einem Modell verwenden können, müssen die Daten in ein für das Modell akzeptables Format gebracht werden. Ein Modell versteht keine Rohtexte, Bilder oder Audiodaten. Diese Eingaben müssen in Zahlen umgewandelt und zu Tensoren zusammengesetzt werden. In dieser Anleitung werden Sie: + +* Textdaten mit einem Tokenizer vorverarbeiten. +* Bild- oder Audiodaten mit einem Feature Extractor vorverarbeiten. +* Daten für eine multimodale Aufgabe mit einem Prozessor vorverarbeiten. + +## NLP + + + +Das wichtigste Werkzeug zur Verarbeitung von Textdaten ist ein [Tokenizer](main_classes/tokenizer). Ein Tokenizer zerlegt Text zunächst nach einer Reihe von Regeln in *Token*. Die Token werden in Zahlen umgewandelt, die zum Aufbau von Tensoren als Eingabe für ein Modell verwendet werden. Alle zusätzlichen Eingaben, die ein Modell benötigt, werden ebenfalls vom Tokenizer hinzugefügt. + + + +Wenn Sie ein vortrainiertes Modell verwenden möchten, ist es wichtig, den zugehörigen vortrainierten Tokenizer zu verwenden. Dadurch wird sichergestellt, dass der Text auf die gleiche Weise aufgeteilt wird wie das Pretraining-Korpus und die gleichen entsprechenden Token-zu-Index (in der Regel als *vocab* bezeichnet) während des Pretrainings verwendet werden. + + + +Laden Sie einen vortrainierten Tokenizer mit der Klasse [AutoTokenizer], um schnell loszulegen. Damit wird das *vocab* heruntergeladen, das verwendet wird, wenn ein Modell vortrainiert wird. + +### Tokenize + +Laden Sie einen vortrainierten Tokenizer mit [`AutoTokenizer.from_pretrained`]: + +```py +>>> from transformers import AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") +``` + +Dann übergeben Sie Ihren Satz an den Tokenizer: + +```py +>>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") +>>> print(encoded_input) +{'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], + 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} +``` + +Der Tokenizer gibt ein Wörterbuch mit drei wichtigen Elementen zurück: + +* [input_ids](glossary#input-ids) sind die Indizes, die den einzelnen Token im Satz entsprechen. +* [attention_mask](glossary#attention-mask) gibt an, ob ein Token beachtet werden soll oder nicht. +* [token_type_ids](glossary#token-type-ids) gibt an, zu welcher Sequenz ein Token gehört, wenn es mehr als eine Sequenz gibt. + +Sie können die `input_ids` dekodieren, um die ursprüngliche Eingabe zurückzugeben: + +```py +>>> tokenizer.decode(encoded_input["input_ids"]) +'[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' +``` + +Wie Sie sehen können, hat der Tokenisierer zwei spezielle Token - `CLS` und `SEP` (Klassifikator und Separator) - zum Satz hinzugefügt. Nicht alle Modelle benötigen +spezielle Token, aber wenn dies der Fall ist, fügt der Tokenisierer sie automatisch für Sie hinzu. + +Wenn Sie mehrere Sätze verarbeiten wollen, übergeben Sie die Sätze als Liste an den Tokenizer: + +```py +>>> batch_sentences = [ +... "But what about second breakfast?", +... "Don't think he knows about second breakfast, Pip.", +... "What about elevensies?", +... ] +>>> encoded_inputs = tokenizer(batch_sentences) +>>> print(encoded_inputs) +{'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], + [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], + [101, 1327, 1164, 5450, 23434, 136, 102]], + 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0]], + 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1]]} +``` + +### Pad + +Dies bringt uns zu einem wichtigen Thema. Wenn Sie einen Haufen von Sätzen verarbeiten, sind diese nicht immer gleich lang. Das ist ein Problem, weil Tensoren, die Eingabe für das Modell, eine einheitliche Form haben müssen. Padding ist eine Strategie, die sicherstellt, dass Tensoren rechteckig sind, indem ein spezielles *Padding-Token* zu Sätzen mit weniger Token hinzugefügt wird. + +Setzen Sie den Parameter "padding" auf "true", um die kürzeren Sequenzen im Stapel so aufzufüllen, dass sie der längsten Sequenz entsprechen: + +```py +>>> batch_sentences = [ +... "But what about second breakfast?", +... "Don't think he knows about second breakfast, Pip.", +... "What about elevensies?", +... ] +>>> encoded_input = tokenizer(batch_sentences, padding=True) +>>> print(encoded_input) +{'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], + [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], + [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], + 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} +``` + +Beachten Sie, dass der Tokenizer den ersten und den dritten Satz mit einer "0" aufgefüllt hat, weil sie kürzer sind! + +### Kürzung + +Auf der anderen Seite des Spektrums kann es vorkommen, dass eine Sequenz zu lang für ein Modell ist. In diesem Fall müssen Sie die Sequenz auf eine kürzere Länge kürzen. + +Setzen Sie den Parameter "truncation" auf "true", um eine Sequenz auf die vom Modell akzeptierte Höchstlänge zu kürzen: + +```py +>>> batch_sentences = [ +... "But what about second breakfast?", +... "Don't think he knows about second breakfast, Pip.", +... "What about elevensies?", +... ] +>>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) +>>> print(encoded_input) +{'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], + [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], + [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], + 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} +``` + +### Tensoren erstellen + +Schließlich möchten Sie, dass der Tokenizer die tatsächlichen Tensoren zurückgibt, die dem Modell zugeführt werden. + +Setzen Sie den Parameter `return_tensors` entweder auf `pt` für PyTorch, oder `tf` für TensorFlow: + + + + +```py +>>> batch_sentences = [ +... "But what about second breakfast?", +... "Don't think he knows about second breakfast, Pip.", +... "What about elevensies?", +... ] +>>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") +>>> print(encoded_input) +{'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], + [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], + [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), + 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), + 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} +``` + + +```py +>>> batch_sentences = [ +... "But what about second breakfast?", +... "Don't think he knows about second breakfast, Pip.", +... "What about elevensies?", +... ] +>>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") +>>> print(encoded_input) +{'input_ids': , + 'token_type_ids': , + 'attention_mask': } +``` + + + +## Audio + +Audioeingaben werden anders vorverarbeitet als Texteingaben, aber das Endziel bleibt dasselbe: numerische Sequenzen zu erstellen, die das Modell verstehen kann. Ein [feature extractor](main_classes/feature_extractor) dient dem ausdrücklichen Zweck, Merkmale aus Rohbild- oder Audiodaten zu extrahieren und in Tensoren zu konvertieren. Bevor Sie beginnen, installieren Sie 🤗 Datasets, um einen Audio-Datensatz zu laden, mit dem Sie experimentieren können: + +```bash +pip install datasets +``` + +Laden Sie den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz (weitere Informationen zum Laden eines Datensatzes finden Sie im 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html)): + +```py +>>> from datasets import load_dataset, Audio + +>>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") +``` + +Greifen Sie auf das erste Element der `audio`-Spalte zu, um einen Blick auf die Eingabe zu werfen. Durch den Aufruf der Spalte "audio" wird die Audiodatei automatisch geladen und neu gesampelt: + +```py +>>> dataset[0]["audio"] +{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, + 0. , 0. ], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', + 'sampling_rate': 8000} +``` + +Dies gibt drei Elemente zurück: + +* "array" ist das Sprachsignal, das als 1D-Array geladen - und möglicherweise neu gesampelt - wurde. +* Pfad" zeigt auf den Speicherort der Audiodatei. +* `sampling_rate` bezieht sich darauf, wie viele Datenpunkte im Sprachsignal pro Sekunde gemessen werden. + +### Resample + +Für dieses Tutorial werden Sie das Modell [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) verwenden. Wie Sie aus der Modellkarte ersehen können, ist das Wav2Vec2-Modell auf 16kHz abgetastetes Sprachaudio vortrainiert. Es ist wichtig, dass die Abtastrate Ihrer Audiodaten mit der Abtastrate des Datensatzes übereinstimmt, der für das Pre-Training des Modells verwendet wurde. Wenn die Abtastrate Ihrer Daten nicht dieselbe ist, müssen Sie Ihre Audiodaten neu abtasten. + +Der Datensatz [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) hat zum Beispiel eine Abtastrate von 8000 kHz. Um das Wav2Vec2-Modell mit diesem Datensatz verwenden zu können, müssen Sie die Abtastrate auf 16 kHz erhöhen: + +```py +>>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") +>>> dataset[0]["audio"] +{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, + 0. , 0. ], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', + 'sampling_rate': 8000} +``` + +1. Verwenden Sie die Methode [~datasets.Dataset.cast_column] von 🤗 Datasets, um die Abtastrate auf 16kHz zu erhöhen: + +```py +>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) +``` + +2. Laden Sie die Audiodatei: + +```py +>>> dataset[0]["audio"] +{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., + 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', + 'sampling_rate': 16000} +``` + +Wie Sie sehen können, ist die Abtastrate jetzt 16kHz! + +### Merkmalsextraktor + +Der nächste Schritt ist das Laden eines Merkmalsextraktors, um die Eingabe zu normalisieren und aufzufüllen. Beim Auffüllen von Textdaten wird für kürzere Sequenzen ein `0` hinzugefügt. Die gleiche Idee gilt für Audiodaten, und der Audio-Feature-Extraktor fügt eine `0` - interpretiert als Stille - zu `array` hinzu. + +Laden Sie den Merkmalsextraktor mit [`AutoFeatureExtractor.from_pretrained`]: + +```py +>>> from transformers import AutoFeatureExtractor + +>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") +``` + +Übergeben Sie das Audio-"Array" an den Feature-Extraktor. Wir empfehlen auch, das Argument `sampling_rate` im Feature Extractor hinzuzufügen, um eventuell auftretende stille Fehler besser zu beheben. + +```py +>>> audio_input = [dataset[0]["audio"]["array"]] +>>> feature_extractor(audio_input, sampling_rate=16000) +{'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., + 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} +``` + +### Auffüllen und Kürzen + +Genau wie beim Tokenizer können Sie variable Sequenzen in einem Stapel durch Auffüllen oder Abschneiden behandeln. Werfen Sie einen Blick auf die Sequenzlänge dieser beiden Audiobeispiele: + +```py +>>> dataset[0]["audio"]["array"].shape +(173398,) + +>>> dataset[1]["audio"]["array"].shape +(106496,) +``` + +Wie Sie sehen können, hat das erste Beispiel eine längere Sequenz als das zweite Beispiel. Lassen Sie uns eine Funktion erstellen, die den Datensatz vorverarbeitet. Geben Sie eine maximale Länge der Probe an, und der Feature-Extraktor wird die Sequenzen entweder auffüllen oder abschneiden, damit sie dieser Länge entsprechen: + +```py +>>> def preprocess_function(examples): +... audio_arrays = [x["array"] for x in examples["audio"]] +... inputs = feature_extractor( +... audio_arrays, +... sampling_rate=16000, +... padding=True, +... max_length=100000, +... truncation=True, +... ) +... return inputs +``` + +Wenden Sie die Funktion auf die ersten paar Beispiele im Datensatz an: + +```py +>>> processed_dataset = preprocess_function(dataset[:5]) +``` + +Schauen Sie sich nun noch einmal die verarbeiteten Beispiel-Längen an: + +```py +>>> processed_dataset["input_values"][0].shape +(100000,) + +>>> processed_dataset["input_values"][1].shape +(100000,) +``` + +Die Länge der ersten beiden Beispiele entspricht nun der von Ihnen angegebenen Maximallänge. + +## Bildverarbeitung + +Ein Merkmalsextraktor wird auch verwendet, um Bilder für Bildverarbeitungsaufgaben zu verarbeiten. Auch hier besteht das Ziel darin, das Rohbild in eine Reihe von Tensoren als Eingabe zu konvertieren. + +Laden wir den [food101](https://huggingface.co/datasets/food101) Datensatz für dieses Tutorial. Verwenden Sie den Parameter 🤗 Datasets `split`, um nur eine kleine Stichprobe aus dem Trainingssplit zu laden, da der Datensatz recht groß ist: + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("food101", split="train[:100]") +``` + +Als Nächstes sehen Sie sich das Bild mit dem Merkmal 🤗 Datensätze [Bild] (https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image) an: + +```py +>>> dataset[0]["image"] +``` + +![vision-preprocess-tutorial.png](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png) + +### Merkmalsextraktor + +Laden Sie den Merkmalsextraktor mit [`AutoFeatureExtractor.from_pretrained`]: + +```py +>>> from transformers import AutoFeatureExtractor + +>>> feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224") +``` + +### Datenerweiterung + +Bei Bildverarbeitungsaufgaben ist es üblich, den Bildern als Teil der Vorverarbeitung eine Art von Datenerweiterung hinzuzufügen. Sie können Erweiterungen mit jeder beliebigen Bibliothek hinzufügen, aber in diesem Tutorial werden Sie das Modul [`transforms`](https://pytorch.org/vision/stable/transforms.html) von torchvision verwenden. + +1. Normalisieren Sie das Bild und verwenden Sie [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html), um einige Transformationen - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) und [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html) - miteinander zu verknüpfen: + +```py +>>> from torchvision.transforms import Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor + +>>> normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) +>>> _transforms = Compose( +... [RandomResizedCrop(feature_extractor.size), ColorJitter(brightness=0.5, hue=0.5), ToTensor(), normalize] +... ) +``` + +2. Das Modell akzeptiert [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) als Eingabe. Dieser Wert wird vom Merkmalsextraktor erzeugt. Erstellen Sie eine Funktion, die `pixel_values` aus den Transformationen erzeugt: + +```py +>>> def transforms(examples): +... examples["pixel_values"] = [_transforms(image.convert("RGB")) for image in examples["image"]] +... return examples +``` + +3. Dann verwenden Sie 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform), um die Transformationen im laufenden Betrieb anzuwenden: + +```py +>>> dataset.set_transform(transforms) +``` + +4. Wenn Sie nun auf das Bild zugreifen, werden Sie feststellen, dass der Feature Extractor die Modelleingabe "pixel_values" hinzugefügt hat: + +```py +>>> dataset[0]["image"] +{'image': , + 'label': 6, + 'pixel_values': tensor([[[ 0.0353, 0.0745, 0.1216, ..., -0.9922, -0.9922, -0.9922], + [-0.0196, 0.0667, 0.1294, ..., -0.9765, -0.9843, -0.9922], + [ 0.0196, 0.0824, 0.1137, ..., -0.9765, -0.9686, -0.8667], + ..., + [ 0.0275, 0.0745, 0.0510, ..., -0.1137, -0.1216, -0.0824], + [ 0.0667, 0.0824, 0.0667, ..., -0.0588, -0.0745, -0.0980], + [ 0.0353, 0.0353, 0.0431, ..., -0.0039, -0.0039, -0.0588]], + + [[ 0.2078, 0.2471, 0.2863, ..., -0.9451, -0.9373, -0.9451], + [ 0.1608, 0.2471, 0.3098, ..., -0.9373, -0.9451, -0.9373], + [ 0.2078, 0.2706, 0.3020, ..., -0.9608, -0.9373, -0.8275], + ..., + [-0.0353, 0.0118, -0.0039, ..., -0.2392, -0.2471, -0.2078], + [ 0.0196, 0.0353, 0.0196, ..., -0.1843, -0.2000, -0.2235], + [-0.0118, -0.0039, -0.0039, ..., -0.0980, -0.0980, -0.1529]], + + [[ 0.3961, 0.4431, 0.4980, ..., -0.9216, -0.9137, -0.9216], + [ 0.3569, 0.4510, 0.5216, ..., -0.9059, -0.9137, -0.9137], + [ 0.4118, 0.4745, 0.5216, ..., -0.9137, -0.8902, -0.7804], + ..., + [-0.2314, -0.1922, -0.2078, ..., -0.4196, -0.4275, -0.3882], + [-0.1843, -0.1686, -0.2000, ..., -0.3647, -0.3804, -0.4039], + [-0.1922, -0.1922, -0.1922, ..., -0.2941, -0.2863, -0.3412]]])} +``` + +Hier sehen Sie, wie das Bild nach der Vorverarbeitung aussieht. Wie von den angewandten Transformationen zu erwarten, wurde das Bild willkürlich beschnitten und seine Farbeigenschaften sind anders. + +```py +>>> import numpy as np +>>> import matplotlib.pyplot as plt + +>>> img = dataset[0]["pixel_values"] +>>> plt.imshow(img.permute(1, 2, 0)) +``` + +![preprocessed_image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png) + +## Multimodal + +Für multimodale Aufgaben werden Sie eine Kombination aus allem, was Sie bisher gelernt haben, verwenden und Ihre Fähigkeiten auf eine Aufgabe der automatischen Spracherkennung (ASR) anwenden. Dies bedeutet, dass Sie einen: + +* Feature Extractor zur Vorverarbeitung der Audiodaten. +* Tokenizer, um den Text zu verarbeiten. + +Kehren wir zum [LJ Speech](https://huggingface.co/datasets/lj_speech) Datensatz zurück: + +```py +>>> from datasets import load_dataset + +>>> lj_speech = load_dataset("lj_speech", split="train") +``` + +Da Sie hauptsächlich an den Spalten "Audio" und "Text" interessiert sind, entfernen Sie die anderen Spalten: + +```py +>>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) +``` + +Schauen Sie sich nun die Spalten "Audio" und "Text" an: + +```py +>>> lj_speech[0]["audio"] +{'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., + 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), + 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', + 'sampling_rate': 22050} + +>>> lj_speech[0]["text"] +'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' +``` + +Erinnern Sie sich an den früheren Abschnitt über die Verarbeitung von Audiodaten: Sie sollten immer die Abtastrate Ihrer Audiodaten [resample](preprocessing#audio), damit sie mit der Abtastrate des Datensatzes übereinstimmt, der für das Vortraining eines Modells verwendet wird: + +```py +>>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) +``` + +### Prozessor + +Ein Processor kombiniert einen Feature-Extraktor und einen Tokenizer. Laden Sie einen Processor mit [`AutoProcessor.from_pretrained]: + +```py +>>> from transformers import AutoProcessor + +>>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") +``` + +1. Erstellen Sie eine Funktion, die die Audiodaten zu `input_values` verarbeitet und den Text zu `labels` tokenisiert. Dies sind Ihre Eingaben für das Modell: + +```py +>>> def prepare_dataset(example): +... audio = example["audio"] + +... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) + +... return example +``` + +2. Wenden Sie die Funktion "prepare_dataset" auf ein Beispiel an: + +```py +>>> prepare_dataset(lj_speech[0]) +``` + +Beachten Sie, dass der Processor `input_values` und `labels` hinzugefügt hat. Auch die Abtastrate wurde korrekt auf 16kHz heruntergerechnet. + +Toll, Sie sollten jetzt in der Lage sein, Daten für jede Modalität vorzuverarbeiten und sogar verschiedene Modalitäten zu kombinieren! Im nächsten Kurs lernen Sie, wie Sie ein Modell mit Ihren neu aufbereiteten Daten feinabstimmen können. From 36e356caa4b213f9d4d02236e6dafb78d8618c01 Mon Sep 17 00:00:00 2001 From: Ryan X Date: Tue, 20 Sep 2022 21:20:38 +0800 Subject: [PATCH 346/539] Fix: update ltp word segmentation call in mlm_wwm (#19047) * Fix: update ltp word segmentation call in mlm_wwm * Fix: update ltp word segmentation call in mlm_wwm * Fix: update ltp word segmentation call in mlm_wwm --- .../mlm_wwm/run_chinese_ref.py | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/examples/research_projects/mlm_wwm/run_chinese_ref.py b/examples/research_projects/mlm_wwm/run_chinese_ref.py index 8c4250a3604f33..4d1c9e81e94ac3 100644 --- a/examples/research_projects/mlm_wwm/run_chinese_ref.py +++ b/examples/research_projects/mlm_wwm/run_chinese_ref.py @@ -79,7 +79,7 @@ def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokeni ltp_res = [] for i in range(0, len(lines), 100): - res = ltp_tokenizer.seg(lines[i : i + 100])[0] + res = ltp_tokenizer.pipeline(lines[i : i + 100], tasks=["cws"]).cws res = [get_chinese_word(r) for r in res] ltp_res.extend(res) assert len(ltp_res) == len(lines) @@ -92,7 +92,6 @@ def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokeni ref_ids = [] for input_ids, chinese_word in zip(bert_res, ltp_res): - input_tokens = [] for id in input_ids: token = bert_tokenizer._convert_id_to_token(id) @@ -133,15 +132,32 @@ def main(args): parser = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", + required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( - "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" + "--ltp", + required=False, + type=str, + default="./resources/ltp", + help="resources for LTP tokenizer, usually a path", + ) + parser.add_argument( + "--bert", + required=False, + type=str, + default="./resources/robert", + help="resources for Bert tokenizer", + ) + parser.add_argument( + "--save_path", + required=False, + type=str, + default="./resources/ref.txt", + help="path to save res", ) - parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") - parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") args = parser.parse_args() main(args) From 36b9a99433b03d9caa8cae48c65348d165bec601 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Tue, 20 Sep 2022 18:53:40 +0300 Subject: [PATCH 347/539] Fix BeitFeatureExtractor postprocessing (#19119) * return post-processed segmentations as list, add test * use torch to resize logits * fix assertion error if no target_size is specified --- .../models/beit/feature_extraction_beit.py | 44 +++++++++---------- tests/models/beit/test_modeling_beit.py | 25 +++++++++++ 2 files changed, 47 insertions(+), 22 deletions(-) diff --git a/src/transformers/models/beit/feature_extraction_beit.py b/src/transformers/models/beit/feature_extraction_beit.py index eac1ba8e32a241..3e12e2d90d1a78 100644 --- a/src/transformers/models/beit/feature_extraction_beit.py +++ b/src/transformers/models/beit/feature_extraction_beit.py @@ -226,43 +226,43 @@ def __call__( return encoded_inputs - def post_process_semantic_segmentation(self, outputs, target_sizes: Union[TensorType, List[Tuple]] = None): + def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None): """ Converts the output of [`BeitForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`BeitForSemanticSegmentation`]): Raw outputs of the model. - target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`, *optional*): - Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. If left to + target_sizes (`List[Tuple]` of length `batch_size`, *optional*): + List of tuples corresponding to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: - semantic_segmentation: `torch.Tensor` of shape `(batch_size, 2)` or `List[torch.Tensor]` of length - `batch_size`, where each item is a semantic segmentation map of of the corresponding target_sizes entry (if - `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. + semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic + segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is + specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ logits = outputs.logits - if len(logits) != len(target_sizes): - raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") - - if target_sizes is not None and target_sizes.shape[1] != 2: - raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") - - semantic_segmentation = logits.argmax(dim=1) - - # Resize semantic segmentation maps + # Resize logits and compute semantic segmentation maps if target_sizes is not None: + if len(logits) != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + if is_torch_tensor(target_sizes): target_sizes = target_sizes.numpy() - resized_maps = [] - semantic_segmentation = semantic_segmentation.numpy() + semantic_segmentation = [] - for idx in range(len(semantic_segmentation)): - resized = self.resize(image=semantic_segmentation[idx], size=target_sizes[idx]) - resized_maps.append(resized) - - semantic_segmentation = [torch.Tensor(np.array(image)) for image in resized_maps] + for idx in range(len(logits)): + resized_logits = torch.nn.functional.interpolate( + logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False + ) + semantic_map = resized_logits[0].argmax(dim=0) + semantic_segmentation.append(semantic_map) + else: + semantic_segmentation = logits.argmax(dim=1) + semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation diff --git a/tests/models/beit/test_modeling_beit.py b/tests/models/beit/test_modeling_beit.py index 7d2d75d2881b75..377ed8e8e94989 100644 --- a/tests/models/beit/test_modeling_beit.py +++ b/tests/models/beit/test_modeling_beit.py @@ -455,3 +455,28 @@ def test_inference_semantic_segmentation(self): ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) + + @slow + def test_post_processing_semantic_segmentation(self): + model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") + model = model.to(torch_device) + + feature_extractor = BeitFeatureExtractor(do_resize=True, size=640, do_center_crop=False) + + ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") + image = Image.open(ds[0]["file"]) + inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + outputs.logits = outputs.logits.detach().cpu() + + segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) + expected_shape = torch.Size((500, 300)) + self.assertEqual(segmentation[0].shape, expected_shape) + + segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs) + expected_shape = torch.Size((160, 160)) + self.assertEqual(segmentation[0].shape, expected_shape) From 06f341de4fa17f5b928c9b7143026248e8080ae4 Mon Sep 17 00:00:00 2001 From: Santiago Castro Date: Tue, 20 Sep 2022 15:43:32 -0400 Subject: [PATCH 348/539] Add a missing space in a script arg documentation (#19113) --- examples/pytorch/language-modeling/run_mlm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index a1b1b91057f306..576fc3528febae 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -72,7 +72,7 @@ class ModelArguments: default=None, metadata={ "help": ( - "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." + "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) From 18643ff29a946c4d21b67d288e6da98bb0c1b169 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 20 Sep 2022 21:52:18 +0200 Subject: [PATCH 349/539] Skip `test_export_to_onnx` for `LongT5` if `torch` < 1.11 (#19122) * Skip if torch < 1.11 * fix quality * fix import * fix typo * fix condition * fix condition * fix condition * fix quality * fix condition Co-authored-by: ydshieh --- tests/models/longt5/test_modeling_longt5.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/models/longt5/test_modeling_longt5.py b/tests/models/longt5/test_modeling_longt5.py index 61ad68921d9d93..bde8505640e2d5 100644 --- a/tests/models/longt5/test_modeling_longt5.py +++ b/tests/models/longt5/test_modeling_longt5.py @@ -39,6 +39,7 @@ LongT5Model, ) from transformers.models.longt5.modeling_longt5 import LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST + from transformers.pytorch_utils import is_torch_less_than_1_11 class LongT5ModelTester: @@ -584,6 +585,10 @@ def test_model_from_pretrained(self): model = LongT5Model.from_pretrained(model_name) self.assertIsNotNone(model) + @unittest.skipIf( + not is_torch_available() or is_torch_less_than_1_11, + "Test failed with torch < 1.11 with an exception in a C++ file.", + ) @slow def test_export_to_onnx(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() From ef6741fe65c130ddb33c43ad2ba2b82f40ea7e90 Mon Sep 17 00:00:00 2001 From: Leandro von Werra Date: Wed, 21 Sep 2022 11:33:22 +0400 Subject: [PATCH 350/539] Fix GLUE MNLI when using `max_eval_samples` (#18722) --- examples/pytorch/text-classification/run_glue.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index f6ed830281e271..3f97fc3f5e1b44 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -549,7 +549,11 @@ def compute_metrics(p: EvalPrediction): eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") - eval_datasets.append(raw_datasets["validation_mismatched"]) + valid_mm_dataset = raw_datasets["validation_mismatched"] + if data_args.max_eval_samples is not None: + max_eval_samples = min(len(valid_mm_dataset), data_args.max_eval_samples) + valid_mm_dataset = valid_mm_dataset.select(range(max_eval_samples)) + eval_datasets.append(valid_mm_dataset) combined = {} for eval_dataset, task in zip(eval_datasets, tasks): From 9e9570664898bdf82cb57461959639f1b78cef47 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Wed, 21 Sep 2022 11:40:35 +0300 Subject: [PATCH 351/539] Add post_process_semantic_segmentation method to SegFormer (#19072) * add post_process_semantic_segmentation method to SegformerFeatureExtractor * add test for semantic segmentation post-processing --- docs/source/en/model_doc/segformer.mdx | 1 + .../segformer/feature_extraction_segformer.py | 49 ++++++++++++++++++- .../segformer/test_modeling_segformer.py | 27 ++++++++++ 3 files changed, 75 insertions(+), 2 deletions(-) diff --git a/docs/source/en/model_doc/segformer.mdx b/docs/source/en/model_doc/segformer.mdx index b5c07f0d858cda..f16515068350bc 100644 --- a/docs/source/en/model_doc/segformer.mdx +++ b/docs/source/en/model_doc/segformer.mdx @@ -93,6 +93,7 @@ SegFormer's results on the segmentation datasets like ADE20k, refer to the [pape [[autodoc]] SegformerFeatureExtractor - __call__ + - post_process_semantic_segmentation ## SegformerModel diff --git a/src/transformers/models/segformer/feature_extraction_segformer.py b/src/transformers/models/segformer/feature_extraction_segformer.py index 0a9ae01ef121e5..d6bc7dde079dda 100644 --- a/src/transformers/models/segformer/feature_extraction_segformer.py +++ b/src/transformers/models/segformer/feature_extraction_segformer.py @@ -14,7 +14,7 @@ # limitations under the License. """Feature extractor class for SegFormer.""" -from typing import Optional, Union +from typing import List, Optional, Tuple, Union import numpy as np from PIL import Image @@ -27,9 +27,12 @@ ImageInput, is_torch_tensor, ) -from ...utils import TensorType, logging +from ...utils import TensorType, is_torch_available, logging +if is_torch_available(): + import torch + logger = logging.get_logger(__name__) @@ -211,3 +214,45 @@ def __call__( encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs + + def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None): + """ + Converts the output of [`SegformerForSemanticSegmentation`] into semantic segmentation maps. Only supports + PyTorch. + + Args: + outputs ([`SegformerForSemanticSegmentation`]): + Raw outputs of the model. + target_sizes (`List[Tuple]` of length `batch_size`, *optional*): + List of tuples corresponding to the requested final size (height, width) of each prediction. If left to + None, predictions will not be resized. + Returns: + semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic + segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is + specified). Each entry of each `torch.Tensor` correspond to a semantic class id. + """ + logits = outputs.logits + + # Resize logits and compute semantic segmentation maps + if target_sizes is not None: + if len(logits) != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + if is_torch_tensor(target_sizes): + target_sizes = target_sizes.numpy() + + semantic_segmentation = [] + + for idx in range(len(logits)): + resized_logits = torch.nn.functional.interpolate( + logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False + ) + semantic_map = resized_logits[0].argmax(dim=0) + semantic_segmentation.append(semantic_map) + else: + semantic_segmentation = logits.argmax(dim=1) + semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] + + return semantic_segmentation diff --git a/tests/models/segformer/test_modeling_segformer.py b/tests/models/segformer/test_modeling_segformer.py index 6a1d273f66426f..d921272b7995d6 100644 --- a/tests/models/segformer/test_modeling_segformer.py +++ b/tests/models/segformer/test_modeling_segformer.py @@ -395,3 +395,30 @@ def test_inference_image_segmentation_city(self): ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-1)) + + @slow + def test_post_processing_semantic_segmentation(self): + # only resize + normalize + feature_extractor = SegformerFeatureExtractor( + image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False + ) + model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( + torch_device + ) + + image = prepare_img() + encoded_inputs = feature_extractor(images=image, return_tensors="pt") + pixel_values = encoded_inputs.pixel_values.to(torch_device) + + with torch.no_grad(): + outputs = model(pixel_values) + + outputs.logits = outputs.logits.detach().cpu() + + segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) + expected_shape = torch.Size((500, 300)) + self.assertEqual(segmentation[0].shape, expected_shape) + + segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs) + expected_shape = torch.Size((128, 128)) + self.assertEqual(segmentation[0].shape, expected_shape) From da6a1b6ca1fc6fa99a1a8c14af5be6fc2b3b02f3 Mon Sep 17 00:00:00 2001 From: Zhong Hui Date: Wed, 21 Sep 2022 19:56:22 +0800 Subject: [PATCH 352/539] [BugFix] Fix fsdp option on shard_grad_op. (#19131) --- src/transformers/training_args.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index dd5e455bcfc4e7..623fa6246701f4 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -1197,7 +1197,7 @@ def __post_init__(self): "`--fsdp offload` can't work on its own. It needs to be added to `--fsdp full_shard` or " '`--fsdp shard_grad_op`. For example, `--fsdp "full_shard offload"`.' ) - elif FSDPOption.FULL_SHARD in self.fsdp and FSDPOption.SHARD_GRAD_OP in self.sharded_ddp: + elif FSDPOption.FULL_SHARD in self.fsdp and FSDPOption.SHARD_GRAD_OP in self.fsdp: raise ValueError("`--fsdp full_shard` is not compatible with `--fsdp shard_grad_op`.") if len(self.fsdp) == 0 and self.fsdp_min_num_params > 0: From e7fdfc720a60772f5ca59f0dedad9a21616dde75 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Wed, 21 Sep 2022 15:15:26 +0300 Subject: [PATCH 353/539] Add post_process_semantic_segmentation method to DPTFeatureExtractor (#19107) * add post-processing method for semantic segmentation * add test for post-processing --- docs/source/en/model_doc/dpt.mdx | 1 + .../models/dpt/feature_extraction_dpt.py | 49 +++++++++++++++++-- tests/models/dpt/test_modeling_dpt.py | 21 ++++++++ 3 files changed, 68 insertions(+), 3 deletions(-) diff --git a/docs/source/en/model_doc/dpt.mdx b/docs/source/en/model_doc/dpt.mdx index cdf009c6c8a071..fec18016615876 100644 --- a/docs/source/en/model_doc/dpt.mdx +++ b/docs/source/en/model_doc/dpt.mdx @@ -37,6 +37,7 @@ This model was contributed by [nielsr](https://huggingface.co/nielsr). The origi [[autodoc]] DPTFeatureExtractor - __call__ + - post_process_semantic_segmentation ## DPTModel diff --git a/src/transformers/models/dpt/feature_extraction_dpt.py b/src/transformers/models/dpt/feature_extraction_dpt.py index d4346b96f8d8e4..8f9f624a9b8f7b 100644 --- a/src/transformers/models/dpt/feature_extraction_dpt.py +++ b/src/transformers/models/dpt/feature_extraction_dpt.py @@ -14,13 +14,12 @@ # limitations under the License. """Feature extractor class for DPT.""" -from typing import Optional, Union +from typing import List, Optional, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin -from ...file_utils import TensorType from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, @@ -28,9 +27,12 @@ ImageInput, is_torch_tensor, ) -from ...utils import logging +from ...utils import TensorType, is_torch_available, logging +if is_torch_available(): + import torch + logger = logging.get_logger(__name__) @@ -200,3 +202,44 @@ def __call__( encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs + + def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None): + """ + Converts the output of [`DPTForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch. + + Args: + outputs ([`DPTForSemanticSegmentation`]): + Raw outputs of the model. + target_sizes (`List[Tuple]` of length `batch_size`, *optional*): + List of tuples corresponding to the requested final size (height, width) of each prediction. If left to + None, predictions will not be resized. + Returns: + semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic + segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is + specified). Each entry of each `torch.Tensor` correspond to a semantic class id. + """ + logits = outputs.logits + + # Resize logits and compute semantic segmentation maps + if target_sizes is not None: + if len(logits) != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + if is_torch_tensor(target_sizes): + target_sizes = target_sizes.numpy() + + semantic_segmentation = [] + + for idx in range(len(logits)): + resized_logits = torch.nn.functional.interpolate( + logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False + ) + semantic_map = resized_logits[0].argmax(dim=0) + semantic_segmentation.append(semantic_map) + else: + semantic_segmentation = logits.argmax(dim=1) + semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] + + return semantic_segmentation diff --git a/tests/models/dpt/test_modeling_dpt.py b/tests/models/dpt/test_modeling_dpt.py index 3266ea78a71aaa..ef063f36179c3d 100644 --- a/tests/models/dpt/test_modeling_dpt.py +++ b/tests/models/dpt/test_modeling_dpt.py @@ -298,3 +298,24 @@ def test_inference_semantic_segmentation(self): ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], expected_slice, atol=1e-4)) + + def test_post_processing_semantic_segmentation(self): + feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large-ade") + model = DPTForSemanticSegmentation.from_pretrained("Intel/dpt-large-ade").to(torch_device) + + image = prepare_img() + inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + outputs.logits = outputs.logits.detach().cpu() + + segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) + expected_shape = torch.Size((500, 300)) + self.assertEqual(segmentation[0].shape, expected_shape) + + segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs) + expected_shape = torch.Size((480, 480)) + self.assertEqual(segmentation[0].shape, expected_shape) From 486134e5a0db659a06d887f9c3c8aa6c2d25ec12 Mon Sep 17 00:00:00 2001 From: Mishig Davaadorj Date: Wed, 21 Sep 2022 14:17:04 +0200 Subject: [PATCH 354/539] Fix FlaxPretTrainedModel pt weights check (#19133) * Fix FlaxPretTrainedModel pt weights check * Update src/transformers/modeling_flax_utils.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * fix raise comment Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- src/transformers/modeling_flax_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/modeling_flax_utils.py b/src/transformers/modeling_flax_utils.py index 3299b543b7f218..7fa4b8e0948588 100644 --- a/src/transformers/modeling_flax_utils.py +++ b/src/transformers/modeling_flax_utils.py @@ -665,7 +665,7 @@ def from_pretrained( archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_INDEX_NAME) is_sharded = True # At this stage we don't have a weight file so we will raise an error. - elif os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME): + elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): raise EnvironmentError( f"Error no file named {FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} " "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those " From 114295c010dd9c94d48add7a0f091ba6ebdf482b Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Wed, 21 Sep 2022 09:37:53 -0400 Subject: [PATCH 355/539] Refuse Datasets 2.5.0 while waiting for a patch --- setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 27ab6efd69ef1e..536ece1c8e6290 100644 --- a/setup.py +++ b/setup.py @@ -102,7 +102,7 @@ "codecarbon==1.2.0", "cookiecutter==1.7.3", "dataclasses", - "datasets", + "datasets!=2.5.0", "deepspeed>=0.6.5", "dill<0.3.5", "evaluate>=0.2.0", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 74c6d00c2a5885..e34b615edb23e4 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -8,7 +8,7 @@ "codecarbon": "codecarbon==1.2.0", "cookiecutter": "cookiecutter==1.7.3", "dataclasses": "dataclasses", - "datasets": "datasets", + "datasets": "datasets!=2.5.0", "deepspeed": "deepspeed>=0.6.5", "dill": "dill<0.3.5", "evaluate": "evaluate>=0.2.0", From 66154a6c87ebe33faffde2826fa395f68b0de3ee Mon Sep 17 00:00:00 2001 From: Quentin Lhoest <42851186+lhoestq@users.noreply.github.com> Date: Wed, 21 Sep 2022 16:15:31 +0200 Subject: [PATCH 356/539] suppoer deps from github (#19141) --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 536ece1c8e6290..547421c8b073fb 100644 --- a/setup.py +++ b/setup.py @@ -177,7 +177,7 @@ # packaging: "packaging" # # some of the values are versioned whereas others aren't. -deps = {b: a for a, b in (re.findall(r"^(([^!=<>~]+)(?:[!=<>~].*)?$)", x)[0] for x in _deps)} +deps = {b: a for a, b in (re.findall(r"^(([^!=<>~ ]+)(?:[!=<>~ ].*)?$)", x)[0] for x in _deps)} # since we save this data in src/transformers/dependency_versions_table.py it can be easily accessed from # anywhere. If you need to quickly access the data from this table in a shell, you can do so easily with: From 451df725d6a34b36be5d0c33c18cc98fb6cf9c31 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 21 Sep 2022 11:41:45 -0400 Subject: [PATCH 357/539] Fix dummy creation for multi-frameworks objects (#19144) --- utils/check_dummies.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/check_dummies.py b/utils/check_dummies.py index 484aac25452fad..6e03bfed8c5935 100644 --- a/utils/check_dummies.py +++ b/utils/check_dummies.py @@ -26,7 +26,7 @@ _re_backend = re.compile(r"is\_([a-z_]*)_available()") # Matches from xxx import bla _re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") -_re_test_backend = re.compile(r"^\s+if\s+not\s+is\_[a-z_]*\_available\(\)") +_re_test_backend = re.compile(r"^\s+if\s+not\s+\(?is\_[a-z_]*\_available\(\)") DUMMY_CONSTANT = """ From d5848a574a3990c95f20512673ecef9f57e0fe81 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 21 Sep 2022 17:46:04 +0200 Subject: [PATCH 358/539] Allowing users to use the latest `tokenizers` release ! (#19139) * Allowing users to use the latest `tokenizers` release ! * Upgrading the versions table too. --- setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 547421c8b073fb..4d7b4e85382621 100644 --- a/setup.py +++ b/setup.py @@ -160,7 +160,7 @@ "tf2onnx", "timeout-decorator", "timm", - "tokenizers>=0.11.1,!=0.11.3,<0.13", + "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch>=1.7,!=1.12.0", "torchaudio", "pyctcdecode>=0.3.0", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index e34b615edb23e4..bfcb0fc8699b1e 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -66,7 +66,7 @@ "tf2onnx": "tf2onnx", "timeout-decorator": "timeout-decorator", "timm": "timm", - "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.13", + "tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14", "torch": "torch>=1.7,!=1.12.0", "torchaudio": "torchaudio", "pyctcdecode": "pyctcdecode>=0.3.0", From 3c7b965bcd5745213134d6363e1879e0d70fa13c Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 21 Sep 2022 14:54:09 -0400 Subject: [PATCH 359/539] Add some tests for check_dummies (#19146) --- .../test_check_copies.py} | 0 tests/repo_utils/test_check_dummies.py | 127 ++++++++++++++++++ utils/check_dummies.py | 5 +- utils/tests_fetcher.py | 2 +- 4 files changed, 131 insertions(+), 3 deletions(-) rename tests/{utils/test_utils_check_copies.py => repo_utils/test_check_copies.py} (100%) create mode 100644 tests/repo_utils/test_check_dummies.py diff --git a/tests/utils/test_utils_check_copies.py b/tests/repo_utils/test_check_copies.py similarity index 100% rename from tests/utils/test_utils_check_copies.py rename to tests/repo_utils/test_check_copies.py diff --git a/tests/repo_utils/test_check_dummies.py b/tests/repo_utils/test_check_dummies.py new file mode 100644 index 00000000000000..8dde0f49443b9c --- /dev/null +++ b/tests/repo_utils/test_check_dummies.py @@ -0,0 +1,127 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import sys +import unittest + + +git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) +sys.path.append(os.path.join(git_repo_path, "utils")) + +import check_dummies +from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 + + +# Align TRANSFORMERS_PATH in check_dummies with the current path +check_dummies.PATH_TO_TRANSFORMERS = os.path.join(git_repo_path, "src", "transformers") + +DUMMY_CONSTANT = """ +{0} = None +""" + +DUMMY_CLASS = """ +class {0}(metaclass=DummyObject): + _backends = {1} + + def __init__(self, *args, **kwargs): + requires_backends(self, {1}) +""" + + +DUMMY_FUNCTION = """ +def {0}(*args, **kwargs): + requires_backends({0}, {1}) +""" + + +class CheckDummiesTester(unittest.TestCase): + def test_find_backend(self): + no_backend = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")') + self.assertIsNone(no_backend) + + simple_backend = find_backend(" if not is_tokenizers_available():") + self.assertEqual(simple_backend, "tokenizers") + + backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") + self.assertEqual(backend_with_underscore, "tensorflow_text") + + double_backend = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):") + self.assertEqual(double_backend, "sentencepiece_and_tokenizers") + + double_backend_with_underscore = find_backend( + " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" + ) + self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") + + triple_backend = find_backend( + " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" + ) + self.assertEqual(triple_backend, "sentencepiece_and_tokenizers_and_vision") + + def test_read_init(self): + objects = read_init() + # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects + self.assertIn("torch", objects) + self.assertIn("tensorflow_text", objects) + self.assertIn("sentencepiece_and_tokenizers", objects) + + # Likewise, we can't assert on the exact content of a key + self.assertIn("BertModel", objects["torch"]) + self.assertIn("TFBertModel", objects["tf"]) + self.assertIn("FlaxBertModel", objects["flax"]) + self.assertIn("BertModel", objects["torch"]) + self.assertIn("TFBertTokenizer", objects["tensorflow_text"]) + self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"]) + + def test_create_dummy_object(self): + dummy_constant = create_dummy_object("CONSTANT", "'torch'") + self.assertEqual(dummy_constant, "\nCONSTANT = None\n") + + dummy_function = create_dummy_object("function", "'torch'") + self.assertEqual( + dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" + ) + + expected_dummy_class = """ +class FakeClass(metaclass=DummyObject): + _backends = 'torch' + + def __init__(self, *args, **kwargs): + requires_backends(self, 'torch') +""" + dummy_class = create_dummy_object("FakeClass", "'torch'") + self.assertEqual(dummy_class, expected_dummy_class) + + def test_create_dummy_files(self): + expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. +# flake8: noqa +from ..utils import DummyObject, requires_backends + + +CONSTANT = None + + +def function(*args, **kwargs): + requires_backends(function, ["torch"]) + + +class FakeClass(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) +""" + dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) + self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file) diff --git a/utils/check_dummies.py b/utils/check_dummies.py index 6e03bfed8c5935..c1a7b2bf68b787 100644 --- a/utils/check_dummies.py +++ b/utils/check_dummies.py @@ -105,9 +105,10 @@ def create_dummy_object(name, backend_name): return DUMMY_CLASS.format(name, backend_name) -def create_dummy_files(): +def create_dummy_files(backend_specific_objects=None): """Create the content of the dummy files.""" - backend_specific_objects = read_init() + if backend_specific_objects is None: + backend_specific_objects = read_init() # For special correspondence backend to module name as used in the function requires_modulename dummy_files = {} diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index 7a645bba12372d..167bf75db1c25d 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -437,7 +437,7 @@ def module_to_test_file(module_fname): return ["tests/onnx/test_features.py", "tests/onnx/test_onnx.py", "tests/onnx/test_onnx_v2.py"] # Special case for utils (not the one in src/transformers, the ones at the root of the repo). elif len(splits) > 0 and splits[0] == "utils": - default_test_file = f"tests/utils/test_utils_{module_name}" + default_test_file = f"tests/repo_utils/test_{module_name}" elif len(splits) > 4 and splits[2] == "models": default_test_file = f"tests/models/{splits[3]}/test_{module_name}" elif len(splits) > 2 and splits[2].startswith("generation"): From c7fd28999fb8a03f4839ab5402a7c32997ded0fa Mon Sep 17 00:00:00 2001 From: Nishant Balepur <55101514+nbalepur@users.noreply.github.com> Date: Wed, 21 Sep 2022 13:59:52 -0500 Subject: [PATCH 360/539] Fixed typo in generation_utils.py (#19145) Changed "unfeasable" to "unfeasible" --- src/transformers/generation_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 10f15304f48c4f..3a9b7b24480ec2 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1275,7 +1275,7 @@ def generate( if min_length is not None and min_length > max_length: raise ValueError( - f"Unfeasable length constraints: the minimum length ({min_length}) is larger than the maximum " + f"Unfeasible length constraints: the minimum length ({min_length}) is larger than the maximum " f"length ({max_length})" ) if input_ids_seq_length >= max_length: From 126a739058c501ca1a9d09637805494c4c1d1d0a Mon Sep 17 00:00:00 2001 From: DepuMeng Date: Thu, 22 Sep 2022 03:45:04 -0400 Subject: [PATCH 361/539] Add support for conditional detr (#18948) * added conditional_detr files * checked copies * checked copies * fixed style and copies * fixed style and copies * fixed hub * fixed style * Update README.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/_toctree.yml Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/index.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/model_doc/conditional_detr.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some doc issue * changed prefix to ConditionalDetr * fixed docs * Update README_ko.md * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fixed use_pretrained issue * changed post-process * added conditional_detr files * checked copies * checked copies * fixed style and copies * fixed style and copies * fixed hub * fixed style * Update README.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/_toctree.yml Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/index.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some doc issue * Update docs/source/en/model_doc/conditional_detr.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * changed prefix to ConditionalDetr * fixed docs * Update README_ko.md * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fixed use_pretrained issue * changed post-process * fix style quality and copies * fix style quality and copies * fix style quality and copies * fix style quality and copies * add more fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some variable names & added more fix-copies * fixed some variable names & added more fix-copies * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added more copied from * fixed quality * changed pretrained config * added more copied-from and fixed the issue in feature_extraction_auto * added conditional_detr files * checked copies * checked copies * fixed style and copies * fixed style and copies * fixed hub * fixed style * Update README.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/_toctree.yml Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/index.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some doc issue * Update docs/source/en/model_doc/conditional_detr.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * changed prefix to ConditionalDetr * fixed docs * Update README_ko.md * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fixed use_pretrained issue * changed post-process * added conditional_detr files * checked copies * fixed style and copies * fixed some doc issue * changed prefix to ConditionalDetr * fixed docs * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fix style quality and copies * fix style quality and copies * fix style quality and copies * add more fix-copies * fixed some variable names & added more fix-copies * fixed some variable names & added more fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added more copied from * fixed quality * changed pretrained config * added more copied-from and fixed the issue in feature_extraction_auto * fixed style * added conditional_detr files * checked copies * checked copies * fixed style and copies * fixed style and copies * fixed hub * fixed style * Update README.md Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/_toctree.yml Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/index.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed some doc issue * Update docs/source/en/model_doc/conditional_detr.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * changed prefix to ConditionalDetr * fixed docs * Update README_ko.md * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fixed use_pretrained issue * changed post-process * added conditional_detr files * checked copies * fixed style and copies * fixed some doc issue * changed prefix to ConditionalDetr * fixed docs * added spatial_model_name * fixed fix-copies * Update src/transformers/models/conditional_detr/modeling_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added some copied from * added some copied from * added some copied from * added some copied from * fix style quality and copies * fix style quality and copies * fix style quality and copies * add more fix-copies * fixed some variable names & added more fix-copies * fixed some variable names & added more fix-copies * Update src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/conditional_detr/configuration_conditional_detr.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * added more copied from * fixed quality * changed pretrained config * added more copied-from and fixed the issue in feature_extraction_auto * rebased Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Depu Meng --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/conditional_detr.mdx | 53 + docs/source/en/serialization.mdx | 1 + src/transformers/__init__.py | 20 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + .../models/auto/feature_extraction_auto.py | 2 +- src/transformers/models/auto/modeling_auto.py | 2 + .../models/conditional_detr/__init__.py | 87 + .../configuration_conditional_detr.py | 240 ++ ..._original_pytorch_checkpoint_to_pytorch.py | 325 ++ .../feature_extraction_conditional_detr.py | 949 ++++++ .../modeling_conditional_detr.py | 2626 +++++++++++++++++ .../utils/dummy_timm_and_vision_objects.py | 31 + .../utils/dummy_vision_objects.py | 7 + tests/models/conditional_detr/__init__.py | 0 ...est_feature_extraction_conditional_detr.py | 342 +++ .../test_modeling_conditional_detr.py | 542 ++++ utils/check_repo.py | 3 + 24 files changed, 5241 insertions(+), 1 deletion(-) create mode 100644 docs/source/en/model_doc/conditional_detr.mdx create mode 100644 src/transformers/models/conditional_detr/__init__.py create mode 100644 src/transformers/models/conditional_detr/configuration_conditional_detr.py create mode 100644 src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py create mode 100644 src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py create mode 100644 src/transformers/models/conditional_detr/modeling_conditional_detr.py create mode 100644 tests/models/conditional_detr/__init__.py create mode 100644 tests/models/conditional_detr/test_feature_extraction_conditional_detr.py create mode 100644 tests/models/conditional_detr/test_modeling_conditional_detr.py diff --git a/README.md b/README.md index 31e4c5af04567d..ec8a0fd2e8b392 100644 --- a/README.md +++ b/README.md @@ -278,6 +278,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. +1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. 1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. 1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. 1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. diff --git a/README_ko.md b/README_ko.md index a4ccc124acc2ff..8a2df2fcbc8887 100644 --- a/README_ko.md +++ b/README_ko.md @@ -228,6 +228,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. +1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. 1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. 1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. 1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. diff --git a/README_zh-hans.md b/README_zh-hans.md index 34839f54a3a808..88611a5f672bd0 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -252,6 +252,7 @@ conda install -c huggingface transformers 1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (来自 Google Research) 伴随论文 [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) 由 Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting 发布。 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (来自 Salesforce) 伴随论文 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 由 Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 发布。 +1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。 1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (来自 YituTech) 伴随论文 [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) 由 Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan 发布。 1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (来自 Facebook AI) 伴随论文 [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) 由 Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie 发布。 1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (来自 Tsinghua University) 伴随论文 [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) 由 Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 9e6a4a0b0ecfe4..b84d3ea8ca974b 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -264,6 +264,7 @@ conda install -c huggingface transformers 1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. +1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. 1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. 1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. 1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index c21388b60a6f9f..a4cd1005e3e83f 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -362,6 +362,8 @@ sections: - local: model_doc/beit title: BEiT + - local: model_doc/conditional_detr + title: Conditional DETR - local: model_doc/convnext title: ConvNeXT - local: model_doc/cvt diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index f118359bc57bee..40685a5c2fa8da 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -68,6 +68,7 @@ The documentation is organized into five sections: 1. **[CANINE](model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. 1. **[CLIP](model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CodeGen](model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. +1. **[Conditional DETR](model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. 1. **[ConvBERT](model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. 1. **[ConvNeXT](model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. 1. **[CPM](model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. @@ -215,6 +216,7 @@ Flax), PyTorch, and/or TensorFlow. | CANINE | ✅ | ❌ | ✅ | ❌ | ❌ | | CLIP | ✅ | ✅ | ✅ | ✅ | ✅ | | CodeGen | ✅ | ✅ | ✅ | ❌ | ❌ | +| Conditional DETR | ❌ | ❌ | ✅ | ❌ | ❌ | | ConvBERT | ✅ | ✅ | ✅ | ✅ | ❌ | | ConvNeXT | ❌ | ❌ | ✅ | ✅ | ❌ | | CTRL | ✅ | ❌ | ✅ | ✅ | ❌ | diff --git a/docs/source/en/model_doc/conditional_detr.mdx b/docs/source/en/model_doc/conditional_detr.mdx new file mode 100644 index 00000000000000..d5846cbfee3270 --- /dev/null +++ b/docs/source/en/model_doc/conditional_detr.mdx @@ -0,0 +1,53 @@ + + +# Conditional DETR + +## Overview + +The Conditional DETR model was proposed in [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. Conditional DETR presents a conditional cross-attention mechanism for fast DETR training. Conditional DETR converges 6.7× to 10× faster than DETR. + +The abstract from the paper is the following: + +*The recently-developed DETR approach applies the transformer encoder and decoder architecture to object detection and achieves promising performance. In this paper, we handle the critical issue, slow training convergence, and present a conditional cross-attention mechanism for fast DETR training. Our approach is motivated by that the cross-attention in DETR relies highly on the content embeddings for localizing the four extremities and predicting the box, which increases the need for high-quality content embeddings and thus the training difficulty. Our approach, named conditional DETR, learns a conditional spatial query from the decoder embedding for decoder multi-head cross-attention. The benefit is that through the conditional spatial query, each cross-attention head is able to attend to a band containing a distinct region, e.g., one object extremity or a region inside the object box. This narrows down the spatial range for localizing the distinct regions for object classification and box regression, thus relaxing the dependence on the content embeddings and easing the training. Empirical results show that conditional DETR converges 6.7× faster for the backbones R50 and R101 and 10× faster for stronger backbones DC5-R50 and DC5-R101. Code is available at https://github.com/Atten4Vis/ConditionalDETR.* + + +This model was contributed by [DepuMeng](https://huggingface.co/DepuMeng). The original code can be found [here](https://github.com/Atten4Vis/ConditionalDETR). + + +## ConditionalDetrConfig + +[[autodoc]] ConditionalDetrConfig + +## ConditionalDetrFeatureExtractor + +[[autodoc]] ConditionalDetrFeatureExtractor + - __call__ + - pad_and_create_pixel_mask + - post_process + - post_process_segmentation + - post_process_panoptic + +## ConditionalDetrModel + +[[autodoc]] ConditionalDetrModel + - forward + +## ConditionalDetrForObjectDetection + +[[autodoc]] ConditionalDetrForObjectDetection + - forward + +## ConditionalDetrForSegmentation + +[[autodoc]] ConditionalDetrForSegmentation + - forward \ No newline at end of file diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index 74f50c78513ce6..a1577447f7235b 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -57,6 +57,7 @@ Ready-made configurations include the following architectures: - CamemBERT - CLIP - CodeGen +- Conditional DETR - ConvBERT - ConvNeXT - Data2VecText diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 3c3a3a50064162..6abc53c85008e3 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -172,6 +172,7 @@ "CLIPVisionConfig", ], "models.codegen": ["CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "CodeGenConfig", "CodeGenTokenizer"], + "models.conditional_detr": ["CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig"], "models.convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer"], "models.convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig"], "models.cpm": [], @@ -660,6 +661,7 @@ _import_structure["models.convnext"].append("ConvNextFeatureExtractor") _import_structure["models.deit"].append("DeiTFeatureExtractor") _import_structure["models.detr"].append("DetrFeatureExtractor") + _import_structure["models.conditional_detr"].append("ConditionalDetrFeatureExtractor") _import_structure["models.donut"].append("DonutFeatureExtractor") _import_structure["models.dpt"].append("DPTFeatureExtractor") _import_structure["models.flava"].extend(["FlavaFeatureExtractor", "FlavaProcessor"]) @@ -708,6 +710,15 @@ "DetrPreTrainedModel", ] ) + _import_structure["models.conditional_detr"].extend( + [ + "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", + "ConditionalDetrForObjectDetection", + "ConditionalDetrForSegmentation", + "ConditionalDetrModel", + "ConditionalDetrPreTrainedModel", + ] + ) try: if not is_scatter_available(): @@ -3075,6 +3086,7 @@ CLIPVisionConfig, ) from .models.codegen import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, CodeGenConfig, CodeGenTokenizer + from .models.conditional_detr import CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig from .models.convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer from .models.convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer @@ -3498,6 +3510,7 @@ from .image_utils import ImageFeatureExtractionMixin from .models.beit import BeitFeatureExtractor from .models.clip import CLIPFeatureExtractor + from .models.conditional_detr import ConditionalDetrFeatureExtractor from .models.convnext import ConvNextFeatureExtractor from .models.deit import DeiTFeatureExtractor from .models.detr import DetrFeatureExtractor @@ -3527,6 +3540,13 @@ except OptionalDependencyNotAvailable: from .utils.dummy_timm_and_vision_objects import * else: + from .models.conditional_detr import ( + CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, + ConditionalDetrForObjectDetection, + ConditionalDetrForSegmentation, + ConditionalDetrModel, + ConditionalDetrPreTrainedModel, + ) from .models.deformable_detr import ( DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, DeformableDetrForObjectDetection, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index fbdbfd579cb9e2..8a6622a9f35b6f 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -38,6 +38,7 @@ canine, clip, codegen, + conditional_detr, convbert, convnext, cpm, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 1204e6608a768f..dce73cb3903656 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -43,6 +43,7 @@ ("canine", "CanineConfig"), ("clip", "CLIPConfig"), ("codegen", "CodeGenConfig"), + ("conditional_detr", "ConditionalDetrConfig"), ("convbert", "ConvBertConfig"), ("convnext", "ConvNextConfig"), ("ctrl", "CTRLConfig"), @@ -175,6 +176,7 @@ ("canine", "CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("clip", "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("codegen", "CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("conditional_detr", "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("convbert", "CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("convnext", "CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("ctrl", "CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -300,6 +302,7 @@ ("canine", "CANINE"), ("clip", "CLIP"), ("codegen", "CodeGen"), + ("conditional_detr", "Conditional DETR"), ("convbert", "ConvBERT"), ("convnext", "ConvNeXT"), ("cpm", "CPM"), diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index 015fd132ef0dc2..cb75f439c233d3 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -39,6 +39,7 @@ [ ("beit", "BeitFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), + ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), @@ -46,7 +47,6 @@ ("deformable_detr", "DetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), - ("detr", "DetrFeatureExtractor"), ("donut", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 7f4968d03cdf66..d7c5f1772f13bd 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -42,6 +42,7 @@ ("canine", "CanineModel"), ("clip", "CLIPModel"), ("codegen", "CodeGenModel"), + ("conditional_detr", "ConditionalDetrModel"), ("convbert", "ConvBertModel"), ("convnext", "ConvNextModel"), ("ctrl", "CTRLModel"), @@ -455,6 +456,7 @@ MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict( [ # Model for Object Detection mapping + ("conditional_detr", "ConditionalDetrForObjectDetection"), ("deformable_detr", "DeformableDetrForObjectDetection"), ("detr", "DetrForObjectDetection"), ("yolos", "YolosForObjectDetection"), diff --git a/src/transformers/models/conditional_detr/__init__.py b/src/transformers/models/conditional_detr/__init__.py new file mode 100644 index 00000000000000..c2f1bdfdbbaae8 --- /dev/null +++ b/src/transformers/models/conditional_detr/__init__.py @@ -0,0 +1,87 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_timm_available, is_vision_available + + +_import_structure = { + "configuration_conditional_detr": [ + "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", + "ConditionalDetrConfig", + "ConditionalDetrOnnxConfig", + ] +} + +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_conditional_detr"] = ["ConditionalDetrFeatureExtractor"] + +try: + if not is_timm_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_conditional_detr"] = [ + "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", + "ConditionalDetrForObjectDetection", + "ConditionalDetrForSegmentation", + "ConditionalDetrModel", + "ConditionalDetrPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_conditional_detr import ( + CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, + ConditionalDetrConfig, + ConditionalDetrOnnxConfig, + ) + + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor + + try: + if not is_timm_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_conditional_detr import ( + CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, + ConditionalDetrForObjectDetection, + ConditionalDetrForSegmentation, + ConditionalDetrModel, + ConditionalDetrPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/conditional_detr/configuration_conditional_detr.py b/src/transformers/models/conditional_detr/configuration_conditional_detr.py new file mode 100644 index 00000000000000..afa6426bc3738f --- /dev/null +++ b/src/transformers/models/conditional_detr/configuration_conditional_detr.py @@ -0,0 +1,240 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Conditional DETR model configuration""" + +from collections import OrderedDict +from typing import Mapping + +from packaging import version + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "microsoft/conditional-detr-resnet-50": ( + "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" + ), +} + + +class ConditionalDetrConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ConditionalDetrModel`]. It is used to instantiate + a Conditional DETR model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the Conditional DETR + [microsoft/conditional-detr-resnet-50](https://huggingface.co/microsoft/conditional-detr-resnet-50) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + num_queries (`int`, *optional*, defaults to 100): + Number of object queries, i.e. detection slots. This is the maximal number of objects + [`ConditionalDetrModel`] can detect in a single image. For COCO, we recommend 100 queries. + d_model (`int`, *optional*, defaults to 256): + Dimension of the layers. + encoder_layers (`int`, *optional*, defaults to 6): + Number of encoder layers. + decoder_layers (`int`, *optional*, defaults to 6): + Number of decoder layers. + encoder_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_attention_heads (`int`, *optional*, defaults to 8): + Number of attention heads for each attention layer in the Transformer decoder. + decoder_ffn_dim (`int`, *optional*, defaults to 2048): + Dimension of the "intermediate" (often named feed-forward) layer in decoder. + encoder_ffn_dim (`int`, *optional*, defaults to 2048): + Dimension of the "intermediate" (often named feed-forward) layer in decoder. + activation_function (`str` or `function`, *optional*, defaults to `"relu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for activations inside the fully connected layer. + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + init_xavier_std (`float`, *optional*, defaults to 1): + The scaling factor used for the Xavier initialization gain in the HM Attention map module. + encoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + decoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + auxiliary_loss (`bool`, *optional*, defaults to `False`): + Whether auxiliary decoding losses (loss at each decoder layer) are to be used. + position_embedding_type (`str`, *optional*, defaults to `"sine"`): + Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`. + backbone (`str`, *optional*, defaults to `"resnet50"`): + Name of convolutional backbone to use. Supports any convolutional backbone from the timm package. For a + list of all available models, see [this + page](https://rwightman.github.io/pytorch-image-models/#load-a-pretrained-model). + use_pretrained_backbone (`bool`, *optional*, defaults to `True`): + Whether to use pretrained weights for the backbone. + dilation (`bool`, *optional*, defaults to `False`): + Whether to replace stride with dilation in the last convolutional block (DC5). + class_cost (`float`, *optional*, defaults to 1): + Relative weight of the classification error in the Hungarian matching cost. + bbox_cost (`float`, *optional*, defaults to 5): + Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. + giou_cost (`float`, *optional*, defaults to 2): + Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. + mask_loss_coefficient (`float`, *optional*, defaults to 1): + Relative weight of the Focal loss in the panoptic segmentation loss. + dice_loss_coefficient (`float`, *optional*, defaults to 1): + Relative weight of the DICE/F-1 loss in the panoptic segmentation loss. + bbox_loss_coefficient (`float`, *optional*, defaults to 5): + Relative weight of the L1 bounding box loss in the object detection loss. + giou_loss_coefficient (`float`, *optional*, defaults to 2): + Relative weight of the generalized IoU loss in the object detection loss. + eos_coefficient (`float`, *optional*, defaults to 0.1): + Relative classification weight of the 'no-object' class in the object detection loss. + + Examples: + + ```python + >>> from transformers import ConditionalDetrModel, ConditionalDetrConfig + + >>> # Initializing a Conditional DETR microsoft/conditional-detr-resnet-50 style configuration + >>> configuration = ConditionalDetrConfig() + + >>> # Initializing a model from the microsoft/conditional-detr-resnet-50 style configuration + >>> model = ConditionalDetrModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "conditional_detr" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "hidden_size": "d_model", + "num_attention_heads": "encoder_attention_heads", + } + + def __init__( + self, + num_channels=3, + num_queries=300, + max_position_embeddings=1024, + encoder_layers=6, + encoder_ffn_dim=2048, + encoder_attention_heads=8, + decoder_layers=6, + decoder_ffn_dim=2048, + decoder_attention_heads=8, + encoder_layerdrop=0.0, + decoder_layerdrop=0.0, + is_encoder_decoder=True, + activation_function="relu", + d_model=256, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + init_xavier_std=1.0, + classifier_dropout=0.0, + scale_embedding=False, + auxiliary_loss=False, + position_embedding_type="sine", + backbone="resnet50", + use_pretrained_backbone=True, + dilation=False, + class_cost=2, + bbox_cost=5, + giou_cost=2, + mask_loss_coefficient=1, + dice_loss_coefficient=1, + cls_loss_coefficient=2, + bbox_loss_coefficient=5, + giou_loss_coefficient=2, + focal_alpha=0.25, + **kwargs + ): + self.num_channels = num_channels + self.num_queries = num_queries + self.max_position_embeddings = max_position_embeddings + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.decoder_layers = decoder_layers + self.decoder_attention_heads = decoder_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.init_xavier_std = init_xavier_std + self.encoder_layerdrop = encoder_layerdrop + self.decoder_layerdrop = decoder_layerdrop + self.num_hidden_layers = encoder_layers + self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True + self.auxiliary_loss = auxiliary_loss + self.position_embedding_type = position_embedding_type + self.backbone = backbone + self.use_pretrained_backbone = use_pretrained_backbone + self.dilation = dilation + # Hungarian matcher + self.class_cost = class_cost + self.bbox_cost = bbox_cost + self.giou_cost = giou_cost + # Loss coefficients + self.mask_loss_coefficient = mask_loss_coefficient + self.dice_loss_coefficient = dice_loss_coefficient + self.cls_loss_coefficient = cls_loss_coefficient + self.bbox_loss_coefficient = bbox_loss_coefficient + self.giou_loss_coefficient = giou_loss_coefficient + self.focal_alpha = focal_alpha + super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) + + @property + def num_attention_heads(self) -> int: + return self.encoder_attention_heads + + @property + def hidden_size(self) -> int: + return self.d_model + + +class ConditionalDetrOnnxConfig(OnnxConfig): + + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ("pixel_mask", {0: "batch"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-5 + + @property + def default_onnx_opset(self) -> int: + return 12 diff --git a/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py new file mode 100644 index 00000000000000..904530c44c2272 --- /dev/null +++ b/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py @@ -0,0 +1,325 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert Conditional DETR checkpoints.""" + + +import argparse +import json +from collections import OrderedDict +from pathlib import Path + +import torch +from PIL import Image + +import requests +from huggingface_hub import hf_hub_download +from transformers import ( + ConditionalDetrConfig, + ConditionalDetrFeatureExtractor, + ConditionalDetrForObjectDetection, + ConditionalDetrForSegmentation, +) +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + +# here we list all keys to be renamed (original name on the left, our name on the right) +rename_keys = [] +for i in range(6): + # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms + rename_keys.append( + (f"transformer.encoder.layers.{i}.self_attn.out_proj.weight", f"encoder.layers.{i}.self_attn.out_proj.weight") + ) + rename_keys.append( + (f"transformer.encoder.layers.{i}.self_attn.out_proj.bias", f"encoder.layers.{i}.self_attn.out_proj.bias") + ) + rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"encoder.layers.{i}.fc1.weight")) + rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"encoder.layers.{i}.fc1.bias")) + rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"encoder.layers.{i}.fc2.weight")) + rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"encoder.layers.{i}.fc2.bias")) + rename_keys.append( + (f"transformer.encoder.layers.{i}.norm1.weight", f"encoder.layers.{i}.self_attn_layer_norm.weight") + ) + rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"encoder.layers.{i}.self_attn_layer_norm.bias")) + rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"encoder.layers.{i}.final_layer_norm.weight")) + rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"encoder.layers.{i}.final_layer_norm.bias")) + # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms + rename_keys.append( + (f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"decoder.layers.{i}.self_attn.out_proj.weight") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"decoder.layers.{i}.self_attn.out_proj.bias") + ) + rename_keys.append( + ( + f"transformer.decoder.layers.{i}.cross_attn.out_proj.weight", + f"decoder.layers.{i}.encoder_attn.out_proj.weight", + ) + ) + rename_keys.append( + ( + f"transformer.decoder.layers.{i}.cross_attn.out_proj.bias", + f"decoder.layers.{i}.encoder_attn.out_proj.bias", + ) + ) + rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"decoder.layers.{i}.fc1.weight")) + rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"decoder.layers.{i}.fc1.bias")) + rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"decoder.layers.{i}.fc2.weight")) + rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"decoder.layers.{i}.fc2.bias")) + rename_keys.append( + (f"transformer.decoder.layers.{i}.norm1.weight", f"decoder.layers.{i}.self_attn_layer_norm.weight") + ) + rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"decoder.layers.{i}.self_attn_layer_norm.bias")) + rename_keys.append( + (f"transformer.decoder.layers.{i}.norm2.weight", f"decoder.layers.{i}.encoder_attn_layer_norm.weight") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.norm2.bias", f"decoder.layers.{i}.encoder_attn_layer_norm.bias") + ) + rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"decoder.layers.{i}.final_layer_norm.weight")) + rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"decoder.layers.{i}.final_layer_norm.bias")) + + # q, k, v projections in self/cross-attention in decoder for conditional DETR + rename_keys.append( + (f"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", f"decoder.layers.{i}.sa_qcontent_proj.weight") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", f"decoder.layers.{i}.sa_kcontent_proj.weight") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.sa_qpos_proj.weight", f"decoder.layers.{i}.sa_qpos_proj.weight") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.sa_kpos_proj.weight", f"decoder.layers.{i}.sa_kpos_proj.weight") + ) + rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.weight", f"decoder.layers.{i}.sa_v_proj.weight")) + rename_keys.append( + (f"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", f"decoder.layers.{i}.ca_qcontent_proj.weight") + ) + # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) + rename_keys.append( + (f"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", f"decoder.layers.{i}.ca_kcontent_proj.weight") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.ca_kpos_proj.weight", f"decoder.layers.{i}.ca_kpos_proj.weight") + ) + rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.weight", f"decoder.layers.{i}.ca_v_proj.weight")) + rename_keys.append( + (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", f"decoder.layers.{i}.ca_qpos_sine_proj.weight") + ) + + rename_keys.append( + (f"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", f"decoder.layers.{i}.sa_qcontent_proj.bias") + ) + rename_keys.append( + (f"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", f"decoder.layers.{i}.sa_kcontent_proj.bias") + ) + rename_keys.append((f"transformer.decoder.layers.{i}.sa_qpos_proj.bias", f"decoder.layers.{i}.sa_qpos_proj.bias")) + rename_keys.append((f"transformer.decoder.layers.{i}.sa_kpos_proj.bias", f"decoder.layers.{i}.sa_kpos_proj.bias")) + rename_keys.append((f"transformer.decoder.layers.{i}.sa_v_proj.bias", f"decoder.layers.{i}.sa_v_proj.bias")) + rename_keys.append( + (f"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", f"decoder.layers.{i}.ca_qcontent_proj.bias") + ) + # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) + rename_keys.append( + (f"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", f"decoder.layers.{i}.ca_kcontent_proj.bias") + ) + rename_keys.append((f"transformer.decoder.layers.{i}.ca_kpos_proj.bias", f"decoder.layers.{i}.ca_kpos_proj.bias")) + rename_keys.append((f"transformer.decoder.layers.{i}.ca_v_proj.bias", f"decoder.layers.{i}.ca_v_proj.bias")) + rename_keys.append( + (f"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", f"decoder.layers.{i}.ca_qpos_sine_proj.bias") + ) + +# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads +# for conditional DETR, also convert reference point head and query scale MLP +rename_keys.extend( + [ + ("input_proj.weight", "input_projection.weight"), + ("input_proj.bias", "input_projection.bias"), + ("query_embed.weight", "query_position_embeddings.weight"), + ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), + ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), + ("class_embed.weight", "class_labels_classifier.weight"), + ("class_embed.bias", "class_labels_classifier.bias"), + ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), + ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), + ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), + ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), + ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), + ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), + ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"), + ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"), + ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"), + ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"), + ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"), + ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"), + ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"), + ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"), + ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"), + ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"), + ] +) + + +def rename_key(state_dict, old, new): + val = state_dict.pop(old) + state_dict[new] = val + + +def rename_backbone_keys(state_dict): + new_state_dict = OrderedDict() + for key, value in state_dict.items(): + if "backbone.0.body" in key: + new_key = key.replace("backbone.0.body", "backbone.conv_encoder.model") + new_state_dict[new_key] = value + else: + new_state_dict[key] = value + + return new_state_dict + + +def read_in_q_k_v(state_dict, is_panoptic=False): + prefix = "" + if is_panoptic: + prefix = "conditional_detr." + + # first: transformer encoder + for i in range(6): + # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) + in_proj_weight = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight") + in_proj_bias = state_dict.pop(f"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"encoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :] + state_dict[f"encoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256] + state_dict[f"encoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :] + state_dict[f"encoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512] + state_dict[f"encoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :] + state_dict[f"encoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:] + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + im = Image.open(requests.get(url, stream=True).raw) + + return im + + +@torch.no_grad() +def convert_conditional_detr_checkpoint(model_name, pytorch_dump_folder_path): + """ + Copy/paste/tweak model's weights to our CONDITIONAL_DETR structure. + """ + + # load default config + config = ConditionalDetrConfig() + # set backbone and dilation attributes + if "resnet101" in model_name: + config.backbone = "resnet101" + if "dc5" in model_name: + config.dilation = True + is_panoptic = "panoptic" in model_name + if is_panoptic: + config.num_labels = 250 + else: + config.num_labels = 91 + repo_id = "datasets/huggingface/label-files" + filename = "coco-detection-id2label.json" + id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = {int(k): v for k, v in id2label.items()} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + + # load feature extractor + format = "coco_panoptic" if is_panoptic else "coco_detection" + feature_extractor = ConditionalDetrFeatureExtractor(format=format) + + # prepare image + img = prepare_img() + encoding = feature_extractor(images=img, return_tensors="pt") + pixel_values = encoding["pixel_values"] + + logger.info(f"Converting model {model_name}...") + + # load original model from torch hub + conditional_detr = torch.hub.load("DeppMeng/ConditionalDETR", model_name, pretrained=True).eval() + state_dict = conditional_detr.state_dict() + # rename keys + for src, dest in rename_keys: + if is_panoptic: + src = "conditional_detr." + src + rename_key(state_dict, src, dest) + state_dict = rename_backbone_keys(state_dict) + # query, key and value matrices need special treatment + read_in_q_k_v(state_dict, is_panoptic=is_panoptic) + # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them + prefix = "conditional_detr.model." if is_panoptic else "model." + for key in state_dict.copy().keys(): + if is_panoptic: + if ( + key.startswith("conditional_detr") + and not key.startswith("class_labels_classifier") + and not key.startswith("bbox_predictor") + ): + val = state_dict.pop(key) + state_dict["conditional_detr.model" + key[4:]] = val + elif "class_labels_classifier" in key or "bbox_predictor" in key: + val = state_dict.pop(key) + state_dict["conditional_detr." + key] = val + elif key.startswith("bbox_attention") or key.startswith("mask_head"): + continue + else: + val = state_dict.pop(key) + state_dict[prefix + key] = val + else: + if not key.startswith("class_labels_classifier") and not key.startswith("bbox_predictor"): + val = state_dict.pop(key) + state_dict[prefix + key] = val + # finally, create HuggingFace model and load state dict + model = ConditionalDetrForSegmentation(config) if is_panoptic else ConditionalDetrForObjectDetection(config) + model.load_state_dict(state_dict) + model.eval() + model.push_to_hub(repo_id=model_name, organization="DepuMeng", commit_message="Add model") + # verify our conversion + original_outputs = conditional_detr(pixel_values) + outputs = model(pixel_values) + assert torch.allclose(outputs.logits, original_outputs["pred_logits"], atol=1e-4) + assert torch.allclose(outputs.pred_boxes, original_outputs["pred_boxes"], atol=1e-4) + if is_panoptic: + assert torch.allclose(outputs.pred_masks, original_outputs["pred_masks"], atol=1e-4) + + # Save model and feature extractor + logger.info(f"Saving PyTorch model and feature extractor to {pytorch_dump_folder_path}...") + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + model.save_pretrained(pytorch_dump_folder_path) + feature_extractor.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "--model_name", + default="conditional_detr_resnet50", + type=str, + help="Name of the CONDITIONAL_DETR model you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." + ) + args = parser.parse_args() + convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) diff --git a/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py new file mode 100644 index 00000000000000..96b9fa69db04e3 --- /dev/null +++ b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py @@ -0,0 +1,949 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for Conditional DETR.""" + +import io +import pathlib +from collections import defaultdict +from typing import Dict, List, Optional, Union + +import numpy as np +from PIL import Image + +from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin +from ...image_utils import ImageFeatureExtractionMixin, is_torch_tensor +from ...utils import TensorType, is_torch_available, logging + + +if is_torch_available(): + import torch + from torch import nn + +logger = logging.get_logger(__name__) + + +ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] + + +# Copied from transformers.models.detr.feature_extraction_detr.center_to_corners_format +def center_to_corners_format(x): + """ + Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format + (x_0, y_0, x_1, y_1). + """ + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +# Copied from transformers.models.detr.feature_extraction_detr.corners_to_center_format +def corners_to_center_format(x): + """ + Converts a NumPy array of bounding boxes of shape (number of bounding boxes, 4) of corners format (x_0, y_0, x_1, + y_1) to center format (center_x, center_y, width, height). + """ + x_transposed = x.T + x0, y0, x1, y1 = x_transposed[0], x_transposed[1], x_transposed[2], x_transposed[3] + b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] + return np.stack(b, axis=-1) + + +# Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes +def masks_to_boxes(masks): + """ + Compute the bounding boxes around the provided panoptic segmentation masks. + + The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. + + Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. + """ + if masks.size == 0: + return np.zeros((0, 4)) + + h, w = masks.shape[-2:] + + y = np.arange(0, h, dtype=np.float32) + x = np.arange(0, w, dtype=np.float32) + # see https://github.com/pytorch/pytorch/issues/50276 + y, x = np.meshgrid(y, x, indexing="ij") + + x_mask = masks * np.expand_dims(x, axis=0) + x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) + x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) + x_min = x.filled(fill_value=1e8) + x_min = x_min.reshape(x_min.shape[0], -1).min(-1) + + y_mask = masks * np.expand_dims(y, axis=0) + y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) + y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) + y_min = y.filled(fill_value=1e8) + y_min = y_min.reshape(y_min.shape[0], -1).min(-1) + + return np.stack([x_min, y_min, x_max, y_max], 1) + + +# Copied from transformers.models.detr.feature_extraction_detr.rgb_to_id +def rgb_to_id(color): + if isinstance(color, np.ndarray) and len(color.shape) == 3: + if color.dtype == np.uint8: + color = color.astype(np.int32) + return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] + return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) + + +# Copied from transformers.models.detr.feature_extraction_detr.id_to_rgb +def id_to_rgb(id_map): + if isinstance(id_map, np.ndarray): + id_map_copy = id_map.copy() + rgb_shape = tuple(list(id_map.shape) + [3]) + rgb_map = np.zeros(rgb_shape, dtype=np.uint8) + for i in range(3): + rgb_map[..., i] = id_map_copy % 256 + id_map_copy //= 256 + return rgb_map + color = [] + for _ in range(3): + color.append(id_map % 256) + id_map //= 256 + return color + + +class ConditionalDetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): + r""" + Constructs a Conditional DETR feature extractor. + + This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users + should refer to this superclass for more information regarding those methods. + + + Args: + format (`str`, *optional*, defaults to `"coco_detection"`): + Data format of the annotations. One of "coco_detection" or "coco_panoptic". + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the input to a certain `size`. + size (`int`, *optional*, defaults to 800): + Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a + sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of + the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * + height / width, size)`. + max_size (`int`, *optional*, defaults to `1333`): + The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is + set to `True`. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether or not to normalize the input with mean and standard deviation. + image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): + The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. + image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): + The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the + ImageNet std. + """ + + model_input_names = ["pixel_values", "pixel_mask"] + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__init__ + def __init__( + self, + format="coco_detection", + do_resize=True, + size=800, + max_size=1333, + do_normalize=True, + image_mean=None, + image_std=None, + **kwargs + ): + super().__init__(**kwargs) + self.format = self._is_valid_format(format) + self.do_resize = do_resize + self.size = size + self.max_size = max_size + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean + self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._is_valid_format + def _is_valid_format(self, format): + if format not in ["coco_detection", "coco_panoptic"]: + raise ValueError(f"Format {format} not supported") + return format + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare + def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): + if self.format == "coco_detection": + image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) + return image, target + elif self.format == "coco_panoptic": + image, target = self.prepare_coco_panoptic(image, target, masks_path) + return image, target + else: + raise ValueError(f"Format {self.format} not supported") + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.convert_coco_poly_to_mask + def convert_coco_poly_to_mask(self, segmentations, height, width): + + try: + from pycocotools import mask as coco_mask + except ImportError: + raise ImportError("Pycocotools is not installed in your environment.") + + masks = [] + for polygons in segmentations: + rles = coco_mask.frPyObjects(polygons, height, width) + mask = coco_mask.decode(rles) + if len(mask.shape) < 3: + mask = mask[..., None] + mask = np.asarray(mask, dtype=np.uint8) + mask = np.any(mask, axis=2) + masks.append(mask) + if masks: + masks = np.stack(masks, axis=0) + else: + masks = np.zeros((0, height, width), dtype=np.uint8) + + return masks + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_detection with DETR->ConditionalDETR + def prepare_coco_detection(self, image, target, return_segmentation_masks=False): + """ + Convert the target in COCO format into the format expected by ConditionalDETR. + """ + w, h = image.size + + image_id = target["image_id"] + image_id = np.asarray([image_id], dtype=np.int64) + + # get all COCO annotations for the given image + anno = target["annotations"] + + anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] + + boxes = [obj["bbox"] for obj in anno] + # guard against no boxes via resizing + boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) + boxes[:, 2:] += boxes[:, :2] + boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) + boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) + + classes = [obj["category_id"] for obj in anno] + classes = np.asarray(classes, dtype=np.int64) + + if return_segmentation_masks: + segmentations = [obj["segmentation"] for obj in anno] + masks = self.convert_coco_poly_to_mask(segmentations, h, w) + + keypoints = None + if anno and "keypoints" in anno[0]: + keypoints = [obj["keypoints"] for obj in anno] + keypoints = np.asarray(keypoints, dtype=np.float32) + num_keypoints = keypoints.shape[0] + if num_keypoints: + keypoints = keypoints.reshape((-1, 3)) + + keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) + boxes = boxes[keep] + classes = classes[keep] + if return_segmentation_masks: + masks = masks[keep] + if keypoints is not None: + keypoints = keypoints[keep] + + target = {} + target["boxes"] = boxes + target["class_labels"] = classes + if return_segmentation_masks: + target["masks"] = masks + target["image_id"] = image_id + if keypoints is not None: + target["keypoints"] = keypoints + + # for conversion to coco api + area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) + iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) + target["area"] = area[keep] + target["iscrowd"] = iscrowd[keep] + + target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) + target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) + + return image, target + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic + def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): + w, h = image.size + ann_info = target.copy() + ann_path = pathlib.Path(masks_path) / ann_info["file_name"] + + if "segments_info" in ann_info: + masks = np.asarray(Image.open(ann_path), dtype=np.uint32) + masks = rgb_to_id(masks) + + ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) + masks = masks == ids[:, None, None] + masks = np.asarray(masks, dtype=np.uint8) + + labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) + + target = {} + target["image_id"] = np.asarray( + [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 + ) + if return_masks: + target["masks"] = masks + target["class_labels"] = labels + + target["boxes"] = masks_to_boxes(masks) + + target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) + target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) + if "segments_info" in ann_info: + target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) + target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) + + return image, target + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._resize + def _resize(self, image, size, target=None, max_size=None): + """ + Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller + edge of the image will be matched to this number. + + If given, also resize the target accordingly. + """ + if not isinstance(image, Image.Image): + image = self.to_pil_image(image) + + def get_size_with_aspect_ratio(image_size, size, max_size=None): + w, h = image_size + if max_size is not None: + min_original_size = float(min((w, h))) + max_original_size = float(max((w, h))) + if max_original_size / min_original_size * size > max_size: + size = int(round(max_size * min_original_size / max_original_size)) + + if (w <= h and w == size) or (h <= w and h == size): + return (h, w) + + if w < h: + ow = size + oh = int(size * h / w) + else: + oh = size + ow = int(size * w / h) + + return (oh, ow) + + def get_size(image_size, size, max_size=None): + if isinstance(size, (list, tuple)): + return size + else: + # size returned must be (w, h) since we use PIL to resize images + # so we revert the tuple + return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] + + size = get_size(image.size, size, max_size) + rescaled_image = self.resize(image, size=size) + + if target is None: + return rescaled_image, None + + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) + ratio_width, ratio_height = ratios + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) + target["boxes"] = scaled_boxes + + if "area" in target: + area = target["area"] + scaled_area = area * (ratio_width * ratio_height) + target["area"] = scaled_area + + w, h = size + target["size"] = np.asarray([h, w], dtype=np.int64) + + if "masks" in target: + # use PyTorch as current workaround + # TODO replace by self.resize + masks = torch.from_numpy(target["masks"][:, None]).float() + interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 + target["masks"] = interpolated_masks.numpy() + + return rescaled_image, target + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._normalize + def _normalize(self, image, mean, std, target=None): + """ + Normalize the image with a certain mean and std. + + If given, also normalize the target bounding boxes based on the size of the image. + """ + + image = self.normalize(image, mean=mean, std=std) + if target is None: + return image, None + + target = target.copy() + h, w = image.shape[-2:] + + if "boxes" in target: + boxes = target["boxes"] + boxes = corners_to_center_format(boxes) + boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) + target["boxes"] = boxes + + return image, target + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__call__ with Detr->ConditionalDetr,DETR->ConditionalDETR + def __call__( + self, + images: ImageInput, + annotations: Union[List[Dict], List[List[Dict]]] = None, + return_segmentation_masks: Optional[bool] = False, + masks_path: Optional[pathlib.Path] = None, + pad_and_return_pixel_mask: Optional[bool] = True, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ) -> BatchFeature: + """ + Main method to prepare for the model one or several image(s) and optional annotations. Images are by default + padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are + real/which are padding. + + + + NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass + PIL images. + + + + Args: + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a + number of channels, H and W are image height and width. + + annotations (`Dict`, `List[Dict]`, *optional*): + The corresponding annotations in COCO format. + + In case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the + annotations for each image should have the following format: {'image_id': int, 'annotations': + [annotation]}, with the annotations being a list of COCO object annotations. + + In case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the + annotations for each image should have the following format: {'image_id': int, 'file_name': str, + 'segments_info': [segment_info]} with segments_info being a list of COCO panoptic annotations. + + return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): + Whether to also include instance segmentation masks as part of the labels in case `format = + "coco_detection"`. + + masks_path (`pathlib.Path`, *optional*): + Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only + relevant in case [`ConditionalDetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. + + pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`): + Whether or not to pad images up to the largest image in a batch and create a pixel mask. + + If left to the default, will return a pixel mask that is: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` + objects. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **pixel_values** -- Pixel values to be fed to a model. + - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if + *"pixel_mask"* is in `self.model_input_names`). + - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) + """ + # Input type checking for clearer error + + valid_images = False + valid_annotations = False + valid_masks_path = False + + # Check that images has a valid type + if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): + valid_images = True + elif isinstance(images, (list, tuple)): + if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): + valid_images = True + + if not valid_images: + raise ValueError( + "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " + "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." + ) + + is_batched = bool( + isinstance(images, (list, tuple)) + and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) + ) + + # Check that annotations has a valid type + if annotations is not None: + if not is_batched: + if self.format == "coco_detection": + if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: + if isinstance(annotations["annotations"], (list, tuple)): + # an image can have no annotations + if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): + valid_annotations = True + elif self.format == "coco_panoptic": + if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: + if isinstance(annotations["segments_info"], (list, tuple)): + # an image can have no segments (?) + if len(annotations["segments_info"]) == 0 or isinstance( + annotations["segments_info"][0], dict + ): + valid_annotations = True + else: + if isinstance(annotations, (list, tuple)): + if len(images) != len(annotations): + raise ValueError("There must be as many annotations as there are images") + if isinstance(annotations[0], Dict): + if self.format == "coco_detection": + if isinstance(annotations[0]["annotations"], (list, tuple)): + valid_annotations = True + elif self.format == "coco_panoptic": + if isinstance(annotations[0]["segments_info"], (list, tuple)): + valid_annotations = True + + if not valid_annotations: + raise ValueError( + """ + Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object + detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter + being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary + should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list + of annotations in COCO format. + """ + ) + + # Check that masks_path has a valid type + if masks_path is not None: + if self.format == "coco_panoptic": + if isinstance(masks_path, pathlib.Path): + valid_masks_path = True + if not valid_masks_path: + raise ValueError( + "The path to the directory containing the mask PNG files should be provided as a" + " `pathlib.Path` object." + ) + + if not is_batched: + images = [images] + if annotations is not None: + annotations = [annotations] + + # prepare (COCO annotations as a list of Dict -> ConditionalDETR target as a single Dict per image) + if annotations is not None: + for idx, (image, target) in enumerate(zip(images, annotations)): + if not isinstance(image, Image.Image): + image = self.to_pil_image(image) + image, target = self.prepare(image, target, return_segmentation_masks, masks_path) + images[idx] = image + annotations[idx] = target + + # transformations (resizing + normalization) + if self.do_resize and self.size is not None: + if annotations is not None: + for idx, (image, target) in enumerate(zip(images, annotations)): + image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) + images[idx] = image + annotations[idx] = target + else: + for idx, image in enumerate(images): + images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] + + if self.do_normalize: + if annotations is not None: + for idx, (image, target) in enumerate(zip(images, annotations)): + image, target = self._normalize( + image=image, mean=self.image_mean, std=self.image_std, target=target + ) + images[idx] = image + annotations[idx] = target + else: + images = [ + self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images + ] + + if pad_and_return_pixel_mask: + # pad images up to largest image in batch and create pixel_mask + max_size = self._max_by_axis([list(image.shape) for image in images]) + c, h, w = max_size + padded_images = [] + pixel_mask = [] + for image in images: + # create padded image + padded_image = np.zeros((c, h, w), dtype=np.float32) + padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) + padded_images.append(padded_image) + # create pixel mask + mask = np.zeros((h, w), dtype=np.int64) + mask[: image.shape[1], : image.shape[2]] = True + pixel_mask.append(mask) + images = padded_images + + # return as BatchFeature + data = {} + data["pixel_values"] = images + if pad_and_return_pixel_mask: + data["pixel_mask"] = pixel_mask + encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) + + if annotations is not None: + # Convert to TensorType + tensor_type = return_tensors + if not isinstance(tensor_type, TensorType): + tensor_type = TensorType(tensor_type) + + if not tensor_type == TensorType.PYTORCH: + raise ValueError("Only PyTorch is supported for the moment.") + else: + if not is_torch_available(): + raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") + + encoded_inputs["labels"] = [ + {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations + ] + + return encoded_inputs + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._max_by_axis + def _max_by_axis(self, the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.pad_and_create_pixel_mask + def pad_and_create_pixel_mask( + self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None + ): + """ + Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. + + Args: + pixel_values_list (`List[torch.Tensor]`): + List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` + objects. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **pixel_values** -- Pixel values to be fed to a model. + - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if + *"pixel_mask"* is in `self.model_input_names`). + + """ + + max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) + c, h, w = max_size + padded_images = [] + pixel_mask = [] + for image in pixel_values_list: + # create padded image + padded_image = np.zeros((c, h, w), dtype=np.float32) + padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) + padded_images.append(padded_image) + # create pixel mask + mask = np.zeros((h, w), dtype=np.int64) + mask[: image.shape[1], : image.shape[2]] = True + pixel_mask.append(mask) + + # return as BatchFeature + data = {"pixel_values": padded_images, "pixel_mask": pixel_mask} + encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) + + return encoded_inputs + + # POSTPROCESSING METHODS + # inspired by https://github.com/Atten4Vis/conditionalDETR/blob/master/models/conditional_detr.py#L258 + def post_process(self, outputs, target_sizes): + """ + Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the COCO api. Only + supports PyTorch. + + Args: + outputs ([`ConditionalDetrObjectDetectionOutput`]): + Raw outputs of the model. + target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): + Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original + image size (before any data augmentation). For visualization, this should be the image size after data + augment, but before padding. + + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image + in the batch as predicted by the model. + """ + out_logits, out_bbox = outputs.logits, outputs.pred_boxes + + if len(out_logits) != len(target_sizes): + raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") + if target_sizes.shape[1] != 2: + raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") + + prob = out_logits.sigmoid() + topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1) + scores = topk_values + topk_boxes = topk_indexes // out_logits.shape[2] + labels = topk_indexes % out_logits.shape[2] + boxes = center_to_corners_format(out_bbox) + boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) + + # and from relative [0, 1] to absolute [0, height] coordinates + img_h, img_w = target_sizes.unbind(1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + boxes = boxes * scale_fct[:, None, :] + + results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] + + return results + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_segmentation with Detr->ConditionalDetr + def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): + """ + Converts the output of [`ConditionalDetrForSegmentation`] into image segmentation predictions. Only supports + PyTorch. + + Parameters: + outputs ([`ConditionalDetrSegmentationOutput`]): + Raw outputs of the model. + target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): + Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. + threshold (`float`, *optional*, defaults to 0.9): + Threshold to use to filter out queries. + mask_threshold (`float`, *optional*, defaults to 0.5): + Threshold to use when turning the predicted masks into binary values. + + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image + in the batch as predicted by the model. + """ + out_logits, raw_masks = outputs.logits, outputs.pred_masks + preds = [] + + def to_tuple(tup): + if isinstance(tup, tuple): + return tup + return tuple(tup.cpu().tolist()) + + for cur_logits, cur_masks, size in zip(out_logits, raw_masks, target_sizes): + # we filter empty queries and detection below threshold + scores, labels = cur_logits.softmax(-1).max(-1) + keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold) + cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) + cur_scores = cur_scores[keep] + cur_classes = cur_classes[keep] + cur_masks = cur_masks[keep] + cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) + cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1 + + predictions = {"scores": cur_scores, "labels": cur_classes, "masks": cur_masks} + preds.append(predictions) + return preds + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_instance with Detr->ConditionalDetr + def post_process_instance(self, results, outputs, orig_target_sizes, max_target_sizes, threshold=0.5): + """ + Converts the output of [`ConditionalDetrForSegmentation`] into actual instance segmentation predictions. Only + supports PyTorch. + + Args: + results (`List[Dict]`): + Results list obtained by [`~ConditionalDetrFeatureExtractor.post_process`], to which "masks" results + will be added. + outputs ([`ConditionalDetrSegmentationOutput`]): + Raw outputs of the model. + orig_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): + Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original + image size (before any data augmentation). + max_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): + Tensor containing the maximum size (h, w) of each image of the batch. For evaluation, this must be the + original image size (before any data augmentation). + threshold (`float`, *optional*, defaults to 0.5): + Threshold to use when turning the predicted masks into binary values. + + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an + image in the batch as predicted by the model. + """ + + if len(orig_target_sizes) != len(max_target_sizes): + raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes") + max_h, max_w = max_target_sizes.max(0)[0].tolist() + outputs_masks = outputs.pred_masks.squeeze(2) + outputs_masks = nn.functional.interpolate( + outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False + ) + outputs_masks = (outputs_masks.sigmoid() > threshold).cpu() + + for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): + img_h, img_w = t[0], t[1] + results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) + results[i]["masks"] = nn.functional.interpolate( + results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" + ).byte() + + return results + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_panoptic with Detr->ConditionalDetr + def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_thing_map=None, threshold=0.85): + """ + Converts the output of [`ConditionalDetrForSegmentation`] into actual panoptic predictions. Only supports + PyTorch. + + Parameters: + outputs ([`ConditionalDetrSegmentationOutput`]): + Raw outputs of the model. + processed_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): + Torch Tensor (or list) containing the size (h, w) of each image of the batch, i.e. the size after data + augmentation but before batching. + target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`, *optional*): + Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. If left to + None, it will default to the `processed_sizes`. + is_thing_map (`torch.Tensor` of shape `(batch_size, 2)`, *optional*): + Dictionary mapping class indices to either True or False, depending on whether or not they are a thing. + If not set, defaults to the `is_thing_map` of COCO panoptic. + threshold (`float`, *optional*, defaults to 0.85): + Threshold to use to filter out queries. + + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for + an image in the batch as predicted by the model. + """ + if target_sizes is None: + target_sizes = processed_sizes + if len(processed_sizes) != len(target_sizes): + raise ValueError("Make sure to pass in as many processed_sizes as target_sizes") + + if is_thing_map is None: + # default to is_thing_map of COCO panoptic + is_thing_map = {i: i <= 90 for i in range(201)} + + out_logits, raw_masks, raw_boxes = outputs.logits, outputs.pred_masks, outputs.pred_boxes + if not len(out_logits) == len(raw_masks) == len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits and masks" + ) + preds = [] + + def to_tuple(tup): + if isinstance(tup, tuple): + return tup + return tuple(tup.cpu().tolist()) + + for cur_logits, cur_masks, cur_boxes, size, target_size in zip( + out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes + ): + # we filter empty queries and detection below threshold + scores, labels = cur_logits.softmax(-1).max(-1) + keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold) + cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) + cur_scores = cur_scores[keep] + cur_classes = cur_classes[keep] + cur_masks = cur_masks[keep] + cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) + cur_boxes = center_to_corners_format(cur_boxes[keep]) + + h, w = cur_masks.shape[-2:] + if len(cur_boxes) != len(cur_classes): + raise ValueError("Not as many boxes as there are classes") + + # It may be that we have several predicted masks for the same stuff class. + # In the following, we track the list of masks ids for each stuff class (they are merged later on) + cur_masks = cur_masks.flatten(1) + stuff_equiv_classes = defaultdict(lambda: []) + for k, label in enumerate(cur_classes): + if not is_thing_map[label.item()]: + stuff_equiv_classes[label.item()].append(k) + + def get_ids_area(masks, scores, dedup=False): + # This helper function creates the final panoptic segmentation image + # It also returns the area of the masks that appears on the image + + m_id = masks.transpose(0, 1).softmax(-1) + + if m_id.shape[-1] == 0: + # We didn't detect any mask :( + m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) + else: + m_id = m_id.argmax(-1).view(h, w) + + if dedup: + # Merge the masks corresponding to the same stuff class + for equiv in stuff_equiv_classes.values(): + if len(equiv) > 1: + for eq_id in equiv: + m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) + + final_h, final_w = to_tuple(target_size) + + seg_img = Image.fromarray(id_to_rgb(m_id.view(h, w).cpu().numpy())) + seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) + + np_seg_img = torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())) + np_seg_img = np_seg_img.view(final_h, final_w, 3) + np_seg_img = np_seg_img.numpy() + + m_id = torch.from_numpy(rgb_to_id(np_seg_img)) + + area = [] + for i in range(len(scores)): + area.append(m_id.eq(i).sum().item()) + return area, seg_img + + area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) + if cur_classes.numel() > 0: + # We know filter empty masks as long as we find some + while True: + filtered_small = torch.as_tensor( + [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device + ) + if filtered_small.any().item(): + cur_scores = cur_scores[~filtered_small] + cur_classes = cur_classes[~filtered_small] + cur_masks = cur_masks[~filtered_small] + area, seg_img = get_ids_area(cur_masks, cur_scores) + else: + break + + else: + cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) + + segments_info = [] + for i, a in enumerate(area): + cat = cur_classes[i].item() + segments_info.append({"id": i, "isthing": is_thing_map[cat], "category_id": cat, "area": a}) + del cur_classes + + with io.BytesIO() as out: + seg_img.save(out, format="PNG") + predictions = {"png_string": out.getvalue(), "segments_info": segments_info} + preds.append(predictions) + return preds diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py new file mode 100644 index 00000000000000..79199ce06e4283 --- /dev/null +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -0,0 +1,2626 @@ +# coding=utf-8 +# Copyright 2022 Microsoft Research Asia and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Conditional DETR model.""" + + +import math +import random +from dataclasses import dataclass +from typing import Dict, List, Optional, Tuple + +import torch +from torch import Tensor, nn + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import torch_int_div +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_scipy_available, + is_timm_available, + is_vision_available, + logging, + replace_return_docstrings, + requires_backends, +) +from .configuration_conditional_detr import ConditionalDetrConfig + + +if is_scipy_available(): + from scipy.optimize import linear_sum_assignment + +if is_vision_available(): + from .feature_extraction_conditional_detr import center_to_corners_format + +if is_timm_available(): + from timm import create_model + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "ConditionalDetrConfig" +_CHECKPOINT_FOR_DOC = "microsoft/conditional-detr-resnet-50" + +CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "microsoft/conditional-detr-resnet-50", + # See all Conditional DETR models at https://huggingface.co/models?filter=conditional_detr +] + + +@dataclass +class ConditionalDetrDecoderOutput(BaseModelOutputWithCrossAttentions): + """ + Base class for outputs of the Conditional DETR decoder. This class adds one attribute to + BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output + of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary + decoding losses. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer + plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in + the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, + used to compute the weighted average in the cross-attention heads. + intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): + Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a + layernorm. + """ + + intermediate_hidden_states: Optional[torch.FloatTensor] = None + reference_points: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class ConditionalDetrModelOutput(Seq2SeqModelOutput): + """ + Base class for outputs of the Conditional DETR encoder-decoder model. This class adds one attribute to + Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder + layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding + losses. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each + layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the + weighted average in the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, + used to compute the weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each + layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the + weighted average in the self-attention heads. + intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): + Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a + layernorm. + """ + + intermediate_hidden_states: Optional[torch.FloatTensor] = None + reference_points: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +# Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->ConditionalDetr +class ConditionalDetrObjectDetectionOutput(ModelOutput): + """ + Output type of [`ConditionalDetrForObjectDetection`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): + Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a + bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized + scale-invariant IoU loss. + loss_dict (`Dict`, *optional*): + A dictionary containing the individual losses. Useful for logging. + logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): + Classification logits (including no-object) for all queries. + pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These + values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding + possible padding). You can use [`~ConditionalDetrFeatureExtractor.post_process`] to retrieve the + unnormalized bounding boxes. + auxiliary_outputs (`list[Dict]`, *optional*): + Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) + and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and + `pred_boxes`) for each decoder layer. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each + layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the + weighted average in the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, + used to compute the weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each + layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the + weighted average in the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + loss_dict: Optional[Dict] = None + logits: torch.FloatTensor = None + pred_boxes: torch.FloatTensor = None + auxiliary_outputs: Optional[List[Dict]] = None + last_hidden_state: Optional[torch.FloatTensor] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +# Copied from transformers.models.detr.modeling_detr.DetrSegmentationOutput with Detr->ConditionalDetr +class ConditionalDetrSegmentationOutput(ModelOutput): + """ + Output type of [`ConditionalDetrForSegmentation`]. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): + Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a + bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized + scale-invariant IoU loss. + loss_dict (`Dict`, *optional*): + A dictionary containing the individual losses. Useful for logging. + logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): + Classification logits (including no-object) for all queries. + pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): + Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These + values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding + possible padding). You can use [`~ConditionalDetrFeatureExtractor.post_process`] to retrieve the + unnormalized bounding boxes. + pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): + Segmentation masks logits for all queries. See also + [`~ConditionalDetrFeatureExtractor.post_process_segmentation`] or + [`~ConditionalDetrFeatureExtractor.post_process_panoptic`] to evaluate instance and panoptic segmentation + masks respectively. + auxiliary_outputs (`list[Dict]`, *optional*): + Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) + and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and + `pred_boxes`) for each decoder layer. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each + layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the + weighted average in the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, + used to compute the weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each + layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the + weighted average in the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + loss_dict: Optional[Dict] = None + logits: torch.FloatTensor = None + pred_boxes: torch.FloatTensor = None + pred_masks: torch.FloatTensor = None + auxiliary_outputs: Optional[List[Dict]] = None + last_hidden_state: Optional[torch.FloatTensor] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +# Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->ConditionalDetr +class ConditionalDetrFrozenBatchNorm2d(nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than + torchvision.models.resnet[18,34,50,101] produce nans. + """ + + def __init__(self, n): + super().__init__() + self.register_buffer("weight", torch.ones(n)) + self.register_buffer("bias", torch.zeros(n)) + self.register_buffer("running_mean", torch.zeros(n)) + self.register_buffer("running_var", torch.ones(n)) + + def _load_from_state_dict( + self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ): + num_batches_tracked_key = prefix + "num_batches_tracked" + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super()._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs + ) + + def forward(self, x): + # move reshapes to the beginning + # to make it user-friendly + weight = self.weight.reshape(1, -1, 1, 1) + bias = self.bias.reshape(1, -1, 1, 1) + running_var = self.running_var.reshape(1, -1, 1, 1) + running_mean = self.running_mean.reshape(1, -1, 1, 1) + epsilon = 1e-5 + scale = weight * (running_var + epsilon).rsqrt() + bias = bias - running_mean * scale + return x * scale + bias + + +# Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->ConditionalDetr +def replace_batch_norm(m, name=""): + for attr_str in dir(m): + target_attr = getattr(m, attr_str) + if isinstance(target_attr, nn.BatchNorm2d): + frozen = ConditionalDetrFrozenBatchNorm2d(target_attr.num_features) + bn = getattr(m, attr_str) + frozen.weight.data.copy_(bn.weight) + frozen.bias.data.copy_(bn.bias) + frozen.running_mean.data.copy_(bn.running_mean) + frozen.running_var.data.copy_(bn.running_var) + setattr(m, attr_str, frozen) + for n, ch in m.named_children(): + replace_batch_norm(ch, n) + + +# Copied from transformers.models.detr.modeling_detr.DetrTimmConvEncoder +class ConditionalDetrTimmConvEncoder(nn.Module): + """ + Convolutional encoder (backbone) from the timm library. + + nn.BatchNorm2d layers are replaced by DetrFrozenBatchNorm2d as defined above. + + """ + + def __init__(self, name: str, dilation: bool, use_pretrained_backbone: bool, num_channels: int = 3): + super().__init__() + + kwargs = {} + if dilation: + kwargs["output_stride"] = 16 + + requires_backends(self, ["timm"]) + + backbone = create_model( + name, + pretrained=use_pretrained_backbone, + features_only=True, + out_indices=(1, 2, 3, 4), + in_chans=num_channels, + **kwargs, + ) + # replace batch norm by frozen batch norm + with torch.no_grad(): + replace_batch_norm(backbone) + self.model = backbone + self.intermediate_channel_sizes = self.model.feature_info.channels() + + if "resnet" in name: + for name, parameter in self.model.named_parameters(): + if "layer2" not in name and "layer3" not in name and "layer4" not in name: + parameter.requires_grad_(False) + + def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): + # send pixel_values through the model to get list of feature maps + features = self.model(pixel_values) + + out = [] + for feature_map in features: + # downsample pixel_mask to match shape of corresponding feature_map + mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] + out.append((feature_map, mask)) + return out + + +# Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->ConditionalDetr +class ConditionalDetrConvModel(nn.Module): + """ + This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. + """ + + def __init__(self, conv_encoder, position_embedding): + super().__init__() + self.conv_encoder = conv_encoder + self.position_embedding = position_embedding + + def forward(self, pixel_values, pixel_mask): + # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples + out = self.conv_encoder(pixel_values, pixel_mask) + pos = [] + for feature_map, mask in out: + # position encoding + pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) + + return out, pos + + +# Copied from transformers.models.detr.modeling_detr._expand_mask with Detr->ConditionalDetr +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): + """ + Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. + """ + batch_size, source_len = mask.size() + target_len = target_len if target_len is not None else source_len + + expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) + + +# Copied from transformers.models.detr.modeling_detr.DetrSinePositionEmbedding with Detr->ConditionalDetr +class ConditionalDetrSinePositionEmbedding(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one used by the Attention is all you + need paper, generalized to work on images. + """ + + def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.embedding_dim = embedding_dim + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, pixel_values, pixel_mask): + if pixel_mask is None: + raise ValueError("No pixel mask provided") + y_embed = pixel_mask.cumsum(1, dtype=torch.float32) + x_embed = pixel_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale + + dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) + dim_t = self.temperature ** (2 * torch_int_div(dim_t, 2) / self.embedding_dim) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +# Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding with Detr->ConditionalDetr +class ConditionalDetrLearnedPositionEmbedding(nn.Module): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, embedding_dim=256): + super().__init__() + self.row_embeddings = nn.Embedding(50, embedding_dim) + self.column_embeddings = nn.Embedding(50, embedding_dim) + + def forward(self, pixel_values, pixel_mask=None): + height, width = pixel_values.shape[-2:] + width_values = torch.arange(width, device=pixel_values.device) + height_values = torch.arange(height, device=pixel_values.device) + x_emb = self.column_embeddings(width_values) + y_emb = self.row_embeddings(height_values) + pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) + pos = pos.permute(2, 0, 1) + pos = pos.unsqueeze(0) + pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) + return pos + + +# Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->ConditionalDetr +def build_position_encoding(config): + n_steps = config.d_model // 2 + if config.position_embedding_type == "sine": + # TODO find a better way of exposing other arguments + position_embedding = ConditionalDetrSinePositionEmbedding(n_steps, normalize=True) + elif config.position_embedding_type == "learned": + position_embedding = ConditionalDetrLearnedPositionEmbedding(n_steps) + else: + raise ValueError(f"Not supported {config.position_embedding_type}") + + return position_embedding + + +# function to generate sine positional embedding for 2d coordinates +def gen_sine_position_embeddings(pos_tensor): + scale = 2 * math.pi + dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device) + dim_t = 10000 ** (2 * (dim_t // 2) / 128) + x_embed = pos_tensor[:, :, 0] * scale + y_embed = pos_tensor[:, :, 1] * scale + pos_x = x_embed[:, :, None] / dim_t + pos_y = y_embed[:, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) + pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) + pos = torch.cat((pos_y, pos_x), dim=2) + return pos + + +def inverse_sigmoid(x, eps=1e-5): + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +# Copied from transformers.models.detr.modeling_detr.DetrAttention +class DetrAttention(nn.Module): + """ + Multi-headed attention from 'Attention Is All You Need' paper. + + Here, we add position embeddings to the queries and keys (as explained in the DETR paper). + """ + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + if self.head_dim * num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): + return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): + return tensor if position_embeddings is None else tensor + position_embeddings + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_embeddings: Optional[torch.Tensor] = None, + key_value_states: Optional[torch.Tensor] = None, + key_value_position_embeddings: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + batch_size, target_len, embed_dim = hidden_states.size() + + # add position embeddings to the hidden states before projecting to queries and keys + if position_embeddings is not None: + hidden_states_original = hidden_states + hidden_states = self.with_pos_embed(hidden_states, position_embeddings) + + # add key-value position embeddings to the key value states + if key_value_position_embeddings is not None: + key_value_states_original = key_value_states + key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings) + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) + value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) + value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) + + proj_shape = (batch_size * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + source_len = key_states.size(1) + + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): + raise ValueError( + f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (batch_size, 1, target_len, source_len): + raise ValueError( + f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" + f" {attention_mask.size()}" + ) + attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask + attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(batch_size, target_len, embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +class ConditionalDetrAttention(nn.Module): + """ + Cross-Attention used in Conditional DETR 'Conditional DETR for Fast Training Convergence' paper. + + The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be + different to v. + """ + + def __init__( + self, + embed_dim: int, + out_dim: int, + num_heads: int, + dropout: float = 0.0, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.out_dim = out_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + if self.head_dim * num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {num_heads})." + ) + # head dimension of values + self.v_head_dim = out_dim // num_heads + if self.v_head_dim * num_heads != self.out_dim: + raise ValueError( + f"out_dim must be divisible by num_heads (got `out_dim`: {self.out_dim} and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + + self.out_proj = nn.Linear(out_dim, out_dim, bias=bias) + + def _qk_shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def _v_shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.v_head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + key_states: Optional[torch.Tensor] = None, + value_states: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = hidden_states * self.scaling + # get key, value proj + key_states = self._qk_shape(key_states, -1, bsz) + value_states = self._v_shape(value_states, -1, bsz) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + v_proj_shape = (bsz * self.num_heads, -1, self.v_head_dim) + query_states = self._qk_shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*v_proj_shape) + + src_len = key_states.size(1) + + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.v_head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.v_head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.v_head_dim) + attn_output = attn_output.transpose(1, 2) + attn_output = attn_output.reshape(bsz, tgt_len, self.out_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped + + +# Copied from transformers.models.detr.modeling_detr.DetrEncoderLayer with DetrEncoderLayer->ConditionalDetrEncoderLayer,DetrConfig->ConditionalDetrConfig +class ConditionalDetrEncoderLayer(nn.Module): + def __init__(self, config: ConditionalDetrConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = DetrAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + position_embeddings: torch.Tensor = None, + output_attentions: bool = False, + ): + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative + values. + position_embeddings (`torch.FloatTensor`, *optional*): position embeddings, to be added to hidden_states. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states, attn_weights = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_embeddings=position_embeddings, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if self.training: + if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class ConditionalDetrDecoderLayer(nn.Module): + def __init__(self, config: ConditionalDetrConfig): + super().__init__() + self.embed_dim = config.d_model + + d_model = config.d_model + # Decoder Self-Attention projections + self.sa_qcontent_proj = nn.Linear(d_model, d_model) + self.sa_qpos_proj = nn.Linear(d_model, d_model) + self.sa_kcontent_proj = nn.Linear(d_model, d_model) + self.sa_kpos_proj = nn.Linear(d_model, d_model) + self.sa_v_proj = nn.Linear(d_model, d_model) + + self.self_attn = ConditionalDetrAttention( + embed_dim=self.embed_dim, + out_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + + # Decoder Cross-Attention projections + self.ca_qcontent_proj = nn.Linear(d_model, d_model) + self.ca_qpos_proj = nn.Linear(d_model, d_model) + self.ca_kcontent_proj = nn.Linear(d_model, d_model) + self.ca_kpos_proj = nn.Linear(d_model, d_model) + self.ca_v_proj = nn.Linear(d_model, d_model) + self.ca_qpos_sine_proj = nn.Linear(d_model, d_model) + + self.encoder_attn = ConditionalDetrAttention( + self.embed_dim * 2, self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + self.nhead = config.decoder_attention_heads + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_embeddings: Optional[torch.Tensor] = None, + query_position_embeddings: Optional[torch.Tensor] = None, + query_sine_embed: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + is_first: Optional[bool] = False, + ): + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + position_embeddings (`torch.FloatTensor`, *optional*): + position embeddings that are added to the queries and keys + in the cross-attention layer. + query_position_embeddings (`torch.FloatTensor`, *optional*): + position embeddings that are added to the queries and keys + in the self-attention layer. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(seq_len, batch, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # ========== Begin of Self-Attention ============= + # Apply projections here + # shape: num_queries x batch_size x 256 + q_content = self.sa_qcontent_proj( + hidden_states + ) # target is the input of the first decoder layer. zero by default. + q_pos = self.sa_qpos_proj(query_position_embeddings) + k_content = self.sa_kcontent_proj(hidden_states) + k_pos = self.sa_kpos_proj(query_position_embeddings) + v = self.sa_v_proj(hidden_states) + + _, num_queries, n_model = q_content.shape + + q = q_content + q_pos + k = k_content + k_pos + hidden_states, self_attn_weights = self.self_attn( + hidden_states=q, + attention_mask=attention_mask, + key_states=k, + value_states=v, + output_attentions=output_attentions, + ) + # ============ End of Self-Attention ============= + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # ========== Begin of Cross-Attention ============= + # Apply projections here + # shape: num_queries x batch_size x 256 + q_content = self.ca_qcontent_proj(hidden_states) + k_content = self.ca_kcontent_proj(encoder_hidden_states) + v = self.ca_v_proj(encoder_hidden_states) + + batch_size, num_queries, n_model = q_content.shape + _, src_len, _ = k_content.shape + + k_pos = self.ca_kpos_proj(position_embeddings) + + # For the first decoder layer, we concatenate the positional embedding predicted from + # the object query (the positional embedding) into the original query (key) in DETR. + if is_first: + q_pos = self.ca_qpos_proj(query_position_embeddings) + q = q_content + q_pos + k = k_content + k_pos + else: + q = q_content + k = k_content + + q = q.view(batch_size, num_queries, self.nhead, n_model // self.nhead) + query_sine_embed = self.ca_qpos_sine_proj(query_sine_embed) + query_sine_embed = query_sine_embed.view(batch_size, num_queries, self.nhead, n_model // self.nhead) + q = torch.cat([q, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2) + k = k.view(batch_size, src_len, self.nhead, n_model // self.nhead) + k_pos = k_pos.view(batch_size, src_len, self.nhead, n_model // self.nhead) + k = torch.cat([k, k_pos], dim=3).view(batch_size, src_len, n_model * 2) + + # Cross-Attention Block + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + hidden_states, cross_attn_weights = self.encoder_attn( + hidden_states=q, + attention_mask=encoder_attention_mask, + key_states=k, + value_states=v, + output_attentions=output_attentions, + ) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # ============ End of Cross-Attention ============= + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + return outputs + + +# Copied from transformers.models.detr.modeling_detr.DetrClassificationHead with Detr->ConditionalDetr +class ConditionalDetrClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): + super().__init__() + self.dense = nn.Linear(input_dim, inner_dim) + self.dropout = nn.Dropout(p=pooler_dropout) + self.out_proj = nn.Linear(inner_dim, num_classes) + + def forward(self, hidden_states: torch.Tensor): + hidden_states = self.dropout(hidden_states) + hidden_states = self.dense(hidden_states) + hidden_states = torch.tanh(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.out_proj(hidden_states) + return hidden_states + + +# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with DetrMLPPredictionHead->MLP +class MLP(nn.Module): + """ + Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, + height and width of a bounding box w.r.t. an image. + + Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py + + """ + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +# Copied from transformers.models.detr.modeling_detr.DetrPreTrainedModel with Detr->ConditionalDetr +class ConditionalDetrPreTrainedModel(PreTrainedModel): + config_class = ConditionalDetrConfig + base_model_prefix = "model" + main_input_name = "pixel_values" + + def _init_weights(self, module): + std = self.config.init_std + xavier_std = self.config.init_xavier_std + + if isinstance(module, ConditionalDetrMHAttentionMap): + nn.init.zeros_(module.k_linear.bias) + nn.init.zeros_(module.q_linear.bias) + nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std) + nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std) + elif isinstance(module, ConditionalDetrLearnedPositionEmbedding): + nn.init.uniform_(module.row_embeddings.weight) + nn.init.uniform_(module.column_embeddings.weight) + if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, ConditionalDetrDecoder): + module.gradient_checkpointing = value + + +CONDITIONAL_DETR_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`ConditionalDetrConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CONDITIONAL_DETR_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. + + Pixel values can be obtained using [`ConditionalDetrFeatureExtractor`]. See + [`ConditionalDetrFeatureExtractor.__call__`] for details. + + pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): + Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + [What are attention masks?](../glossary#attention-mask) + + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, num_queries)`, *optional*): + Not used by default. Can be used to mask object queries. + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you + can choose to directly pass a flattened representation of an image. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): + Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an + embedded representation. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.detr.modeling_detr.DetrEncoder with Detr->ConditionalDetr,DETR->ConditionalDETR +class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`ConditionalDetrEncoderLayer`]. + + The encoder updates the flattened feature map through multiple self-attention layers. + + Small tweak for ConditionalDETR: + + - position_embeddings are added to the forward pass. + + Args: + config: ConditionalDetrConfig + """ + + def __init__(self, config: ConditionalDetrConfig): + super().__init__(config) + + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + + self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) + + # in the original ConditionalDETR, no layernorm is used at the end of the encoder, as "normalize_before" is set to False by default + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + inputs_embeds=None, + attention_mask=None, + position_embeddings=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. + + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: + + - 1 for pixel features that are real (i.e. **not masked**), + - 0 for pixel features that are padding (i.e. **masked**). + + [What are attention masks?](../glossary#attention-mask) + + position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Position embeddings that are added to the queries and keys in each self-attention layer. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + hidden_states = inputs_embeds + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # expand attention_mask + if attention_mask is not None: + # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] + attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + for i, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): # skip the layer + layer_outputs = (None, None) + else: + # we add position_embeddings as extra input to the encoder_layer + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + position_embeddings=position_embeddings, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class ConditionalDetrDecoder(ConditionalDetrPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`ConditionalDetrDecoderLayer`]. + + The decoder updates the query embeddings through multiple self-attention and cross-attention layers. + + Some small tweaks for Conditional DETR: + + - position_embeddings and query_position_embeddings are added to the forward pass. + - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. + + Args: + config: ConditionalDetrConfig + """ + + def __init__(self, config: ConditionalDetrConfig): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + + self.layers = nn.ModuleList([ConditionalDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) + # in Conditional DETR, the decoder uses layernorm after the last decoder layer output + self.layernorm = nn.LayerNorm(config.d_model) + d_model = config.d_model + self.gradient_checkpointing = False + + # query_scale is the FFN applied on f to generate transformation T + self.query_scale = MLP(d_model, d_model, d_model, 2) + self.ref_point_head = MLP(d_model, d_model, 2, 2) + for layer_id in range(config.decoder_layers - 1): + self.layers[layer_id + 1].ca_qpos_proj = None + + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + inputs_embeds=None, + attention_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + position_embeddings=None, + query_position_embeddings=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + The query embeddings that are passed into the decoder. + + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: + + - 1 for queries that are **not masked**, + - 0 for queries that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected + in `[0, 1]`: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Position embeddings that are added to the queries and keys in each cross-attention layer. + query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): + , *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if inputs_embeds is not None: + hidden_states = inputs_embeds + input_shape = inputs_embeds.size()[:-1] + + combined_attention_mask = None + + if attention_mask is not None and combined_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = combined_attention_mask + _expand_mask( + attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask( + encoder_attention_mask, inputs_embeds.dtype, target_len=input_shape[-1] + ) + + # optional intermediate hidden states + intermediate = () if self.config.auxiliary_loss else None + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + + reference_points_before_sigmoid = self.ref_point_head( + query_position_embeddings + ) # [num_queries, batch_size, 2] + reference_points = reference_points_before_sigmoid.sigmoid().transpose(0, 1) + obj_center = reference_points[..., :2].transpose(0, 1) + # get sine embedding for the query vector + query_sine_embed_before_transformation = gen_sine_position_embeddings(obj_center) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): + continue + if idx == 0: + pos_transformation = 1 + else: + pos_transformation = self.query_scale(hidden_states) + # apply transformation + query_sine_embed = query_sine_embed_before_transformation * pos_transformation + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + combined_attention_mask, + position_embeddings, + query_position_embeddings, + query_sine_embed, + encoder_hidden_states, + encoder_attention_mask, + None, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=combined_attention_mask, + position_embeddings=position_embeddings, + query_position_embeddings=query_position_embeddings, + query_sine_embed=query_sine_embed, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + is_first=(idx == 0), + ) + + hidden_states = layer_outputs[0] + + if self.config.auxiliary_loss: + hidden_states = self.layernorm(hidden_states) + intermediate += (hidden_states,) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # finally, apply layernorm + hidden_states = self.layernorm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + # stack intermediate decoder activations + if self.config.auxiliary_loss: + intermediate = torch.stack(intermediate) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + all_hidden_states, + all_self_attns, + all_cross_attentions, + intermediate, + reference_points, + ] + if v is not None + ) + return ConditionalDetrDecoderOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + intermediate_hidden_states=intermediate, + reference_points=reference_points, + ) + + +@add_start_docstrings( + """ + The bare Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw + hidden-states without any specific head on top. + """, + CONDITIONAL_DETR_START_DOCSTRING, +) +class ConditionalDetrModel(ConditionalDetrPreTrainedModel): + def __init__(self, config: ConditionalDetrConfig): + super().__init__(config) + + # Create backbone + positional encoding + backbone = ConditionalDetrTimmConvEncoder( + config.backbone, config.dilation, config.use_pretrained_backbone, config.num_channels + ) + position_embeddings = build_position_encoding(config) + self.backbone = ConditionalDetrConvModel(backbone, position_embeddings) + + # Create projection layer + self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1) + + self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) + + self.encoder = ConditionalDetrEncoder(config) + self.decoder = ConditionalDetrDecoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + def freeze_backbone(self): + for name, param in self.backbone.conv_encoder.model.named_parameters(): + param.requires_grad_(False) + + def unfreeze_backbone(self): + for name, param in self.backbone.conv_encoder.model.named_parameters(): + param.requires_grad_(True) + + @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=ConditionalDetrModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values, + pixel_mask=None, + decoder_attention_mask=None, + encoder_outputs=None, + inputs_embeds=None, + decoder_inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + Examples: + + ```python + >>> from transformers import ConditionalDetrFeatureExtractor, ConditionalDetrModel + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") + >>> model = ConditionalDetrModel.from_pretrained("microsoft/conditional-detr-resnet-50") + + >>> # prepare image for the model + >>> inputs = feature_extractor(images=image, return_tensors="pt") + + >>> # forward pass + >>> outputs = model(**inputs) + + >>> # the last hidden states are the final query embeddings of the Transformer decoder + >>> # these are of shape (batch_size, num_queries, hidden_size) + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + [1, 300, 256] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, num_channels, height, width = pixel_values.shape + device = pixel_values.device + + if pixel_mask is None: + pixel_mask = torch.ones(((batch_size, height, width)), device=device) + + # First, sent pixel_values + pixel_mask through Backbone to obtain the features + # pixel_values should be of shape (batch_size, num_channels, height, width) + # pixel_mask should be of shape (batch_size, height, width) + features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) + + # get final feature map and downsampled mask + feature_map, mask = features[-1] + + if mask is None: + raise ValueError("Backbone does not return downsampled pixel mask") + + # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) + projected_feature_map = self.input_projection(feature_map) + + # Third, flatten the feature map + position embeddings of shape NxCxHxW to NxCxHW, and permute it to NxHWxC + # In other words, turn their shape into (batch_size, sequence_length, hidden_size) + flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) + position_embeddings = position_embeddings_list[-1].flatten(2).permute(0, 2, 1) + + flattened_mask = mask.flatten(1) + + # Fourth, sent flattened_features + flattened_mask + position embeddings through encoder + # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) + # flattened_mask is a Tensor of shape (batch_size, heigth*width) + if encoder_outputs is None: + encoder_outputs = self.encoder( + inputs_embeds=flattened_features, + attention_mask=flattened_mask, + position_embeddings=position_embeddings, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # Fifth, sent query embeddings + position embeddings through the decoder (which is conditioned on the encoder output) + query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) + queries = torch.zeros_like(query_position_embeddings) + + # decoder outputs consists of (dec_features, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + inputs_embeds=queries, + attention_mask=None, + position_embeddings=position_embeddings, + query_position_embeddings=query_position_embeddings, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=flattened_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return ConditionalDetrModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, + reference_points=decoder_outputs.reference_points, + ) + + +@add_start_docstrings( + """ + CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on + top, for tasks such as COCO detection. + """, + CONDITIONAL_DETR_START_DOCSTRING, +) +class ConditionalDetrForObjectDetection(ConditionalDetrPreTrainedModel): + def __init__(self, config: ConditionalDetrConfig): + super().__init__(config) + + # CONDITIONAL DETR encoder-decoder model + self.model = ConditionalDetrModel(config) + + # Object detection heads + self.class_labels_classifier = nn.Linear( + config.d_model, config.num_labels + ) # We add one for the "no object" class + self.bbox_predictor = ConditionalDetrMLPPredictionHead( + input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 + ) + + # Initialize weights and apply final processing + self.post_init() + + # taken from https://github.com/Atten4Vis/conditionalDETR/blob/master/models/conditional_detr.py + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_coord): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] + + @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=ConditionalDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values, + pixel_mask=None, + decoder_attention_mask=None, + encoder_outputs=None, + inputs_embeds=None, + decoder_inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (`List[Dict]` of len `(batch_size,)`, *optional*): + Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the + following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch + respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes + in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. + + Returns: + + Examples: + + ```python + >>> from transformers import ConditionalDetrFeatureExtractor, ConditionalDetrForObjectDetection + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") + >>> model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50") + + >>> inputs = feature_extractor(images=image, return_tensors="pt") + >>> outputs = model(**inputs) + >>> # model predicts bounding boxes and corresponding COCO classes + >>> logits = outputs.logits + >>> bboxes = outputs.pred_boxes + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # First, sent images through CONDITIONAL_DETR base model to obtain encoder + decoder outputs + outputs = self.model( + pixel_values, + pixel_mask=pixel_mask, + decoder_attention_mask=decoder_attention_mask, + encoder_outputs=encoder_outputs, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + # class logits + predicted bounding boxes + logits = self.class_labels_classifier(sequence_output) + + reference = outputs.reference_points if return_dict else outputs[-1] + reference_before_sigmoid = inverse_sigmoid(reference).transpose(0, 1) + outputs_coords = [] + hs = sequence_output + tmp = self.bbox_predictor(hs) + tmp[..., :2] += reference_before_sigmoid + pred_boxes = tmp.sigmoid() + # pred_boxes = self.bbox_predictor(sequence_output).sigmoid() + + loss, loss_dict, auxiliary_outputs = None, None, None + if labels is not None: + # First: create the matcher + matcher = ConditionalDetrHungarianMatcher( + class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost + ) + # Second: create the criterion + losses = ["labels", "boxes", "cardinality"] + criterion = ConditionalDetrLoss( + matcher=matcher, + num_classes=self.config.num_labels, + focal_alpha=self.config.focal_alpha, + losses=losses, + ) + criterion.to(self.device) + # Third: compute the losses, based on outputs and labels + outputs_loss = {} + outputs_loss["logits"] = logits + outputs_loss["pred_boxes"] = pred_boxes + if self.config.auxiliary_loss: + intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] + outputs_class = self.class_labels_classifier(intermediate) + + for lvl in range(hs.shape[0]): + tmp = self.bbox_predictor(hs[lvl]) + tmp[..., :2] += reference_before_sigmoid + outputs_coord = tmp.sigmoid() + outputs_coords.append(outputs_coord) + outputs_coord = torch.stack(outputs_coords) + + auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) + outputs_loss["auxiliary_outputs"] = auxiliary_outputs + + loss_dict = criterion(outputs_loss, labels) + # Fourth: compute total loss, as a weighted sum of the various losses + weight_dict = {"loss_ce": self.config.cls_loss_coefficient, "loss_bbox": self.config.bbox_loss_coefficient} + weight_dict["loss_giou"] = self.config.giou_loss_coefficient + if self.config.auxiliary_loss: + aux_weight_dict = {} + for i in range(self.config.decoder_layers - 1): + aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) + + if not return_dict: + if auxiliary_outputs is not None: + output = (logits, pred_boxes) + auxiliary_outputs + outputs + else: + output = (logits, pred_boxes) + outputs + return ((loss, loss_dict) + output) if loss is not None else output + + return ConditionalDetrObjectDetectionOutput( + loss=loss, + loss_dict=loss_dict, + logits=logits, + pred_boxes=pred_boxes, + auxiliary_outputs=auxiliary_outputs, + last_hidden_state=outputs.last_hidden_state, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + +@add_start_docstrings( + """ + CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top, + for tasks such as COCO panoptic. + + """, + CONDITIONAL_DETR_START_DOCSTRING, +) +class ConditionalDetrForSegmentation(ConditionalDetrPreTrainedModel): + def __init__(self, config: ConditionalDetrConfig): + super().__init__(config) + + # object detection model + self.conditional_detr = ConditionalDetrForObjectDetection(config) + + # segmentation head + hidden_size, number_of_heads = config.d_model, config.encoder_attention_heads + intermediate_channel_sizes = self.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes + + self.mask_head = ConditionalDetrMaskHeadSmallConv( + hidden_size + number_of_heads, intermediate_channel_sizes[::-1][-3:], hidden_size + ) + + self.bbox_attention = ConditionalDetrMHAttentionMap( + hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=ConditionalDetrSegmentationOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values, + pixel_mask=None, + decoder_attention_mask=None, + encoder_outputs=None, + inputs_embeds=None, + decoder_inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (`List[Dict]` of len `(batch_size,)`, *optional*): + Labels for computing the bipartite matching loss, DICE/F-1 loss and Focal loss. List of dicts, each + dictionary containing at least the following 3 keys: 'class_labels', 'boxes' and 'masks' (the class labels, + bounding boxes and segmentation masks of an image in the batch respectively). The class labels themselves + should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)`, the boxes a + `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)` and the masks a + `torch.FloatTensor` of shape `(number of bounding boxes in the image, height, width)`. + + Returns: + + Examples: + + ```python + >>> import io + >>> import requests + >>> from PIL import Image + >>> import torch + >>> import numpy + + >>> from transformers import ConditionalDetrFeatureExtractor, ConditionalDetrForSegmentation + >>> from transformers.models.conditional_detr.feature_extraction_conditional_detr import rgb_to_id + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = ConditionalDetrFeatureExtractor.from_pretrained( + ... "facebook/conditional_detr-resnet-50-panoptic" + ... ) + >>> model = ConditionalDetrForSegmentation.from_pretrained("facebook/conditional_detr-resnet-50-panoptic") + + >>> # prepare image for the model + >>> inputs = feature_extractor(images=image, return_tensors="pt") + + >>> # forward pass + >>> outputs = model(**inputs) + + >>> # use the `post_process_panoptic` method of `ConditionalDetrFeatureExtractor` to convert to COCO format + >>> processed_sizes = torch.as_tensor(inputs["pixel_values"].shape[-2:]).unsqueeze(0) + >>> result = feature_extractor.post_process_panoptic(outputs, processed_sizes)[0] + + >>> # the segmentation is stored in a special-format png + >>> panoptic_seg = Image.open(io.BytesIO(result["png_string"])) + >>> panoptic_seg = numpy.array(panoptic_seg, dtype=numpy.uint8) + >>> # retrieve the ids corresponding to each mask + >>> panoptic_seg_id = rgb_to_id(panoptic_seg) + >>> panoptic_seg_id.shape + (800, 1066) + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + batch_size, num_channels, height, width = pixel_values.shape + device = pixel_values.device + + if pixel_mask is None: + pixel_mask = torch.ones((batch_size, height, width), device=device) + + # First, get list of feature maps and position embeddings + features, position_embeddings_list = self.conditional_detr.model.backbone(pixel_values, pixel_mask=pixel_mask) + + # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) + feature_map, mask = features[-1] + batch_size, num_channels, height, width = feature_map.shape + projected_feature_map = self.conditional_detr.model.input_projection(feature_map) + + # Third, flatten the feature map + position embeddings of shape NxCxHxW to NxCxHW, and permute it to NxHWxC + # In other words, turn their shape into (batch_size, sequence_length, hidden_size) + flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) + position_embeddings = position_embeddings_list[-1].flatten(2).permute(0, 2, 1) + + flattened_mask = mask.flatten(1) + + # Fourth, sent flattened_features + flattened_mask + position embeddings through encoder + # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) + # flattened_mask is a Tensor of shape (batch_size, heigth*width) + if encoder_outputs is None: + encoder_outputs = self.conditional_detr.model.encoder( + inputs_embeds=flattened_features, + attention_mask=flattened_mask, + position_embeddings=position_embeddings, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # Fifth, sent query embeddings + position embeddings through the decoder (which is conditioned on the encoder output) + query_position_embeddings = self.conditional_detr.model.query_position_embeddings.weight.unsqueeze(0).repeat( + batch_size, 1, 1 + ) + queries = torch.zeros_like(query_position_embeddings) + + # decoder outputs consists of (dec_features, dec_hidden, dec_attn) + decoder_outputs = self.conditional_detr.model.decoder( + inputs_embeds=queries, + attention_mask=None, + position_embeddings=position_embeddings, + query_position_embeddings=query_position_embeddings, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=flattened_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = decoder_outputs[0] + + # Sixth, compute logits, pred_boxes and pred_masks + logits = self.conditional_detr.class_labels_classifier(sequence_output) + pred_boxes = self.conditional_detr.bbox_predictor(sequence_output).sigmoid() + + memory = encoder_outputs[0].permute(0, 2, 1).view(batch_size, self.config.d_model, height, width) + mask = flattened_mask.view(batch_size, height, width) + + # FIXME h_boxes takes the last one computed, keep this in mind + # important: we need to reverse the mask, since in the original implementation the mask works reversed + # bbox_mask is of shape (batch_size, num_queries, number_of_attention_heads in bbox_attention, height/32, width/32) + bbox_mask = self.bbox_attention(sequence_output, memory, mask=~mask) + + seg_masks = self.mask_head(projected_feature_map, bbox_mask, [features[2][0], features[1][0], features[0][0]]) + + pred_masks = seg_masks.view( + batch_size, self.conditional_detr.config.num_queries, seg_masks.shape[-2], seg_masks.shape[-1] + ) + + loss, loss_dict, auxiliary_outputs = None, None, None + if labels is not None: + # First: create the matcher + matcher = ConditionalDetrHungarianMatcher( + class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost + ) + # Second: create the criterion + losses = ["labels", "boxes", "cardinality", "masks"] + criterion = ConditionalDetrLoss( + matcher=matcher, + num_classes=self.config.num_labels, + focal_alpha=self.config.focal_alpha, + losses=losses, + ) + criterion.to(self.device) + # Third: compute the losses, based on outputs and labels + outputs_loss = {} + outputs_loss["logits"] = logits + outputs_loss["pred_boxes"] = pred_boxes + outputs_loss["pred_masks"] = pred_masks + if self.config.auxiliary_loss: + intermediate = decoder_outputs.intermediate_hidden_states if return_dict else decoder_outputs[-1] + outputs_class = self.class_labels_classifier(intermediate) + outputs_coord = self.bbox_predictor(intermediate).sigmoid() + auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) + outputs_loss["auxiliary_outputs"] = auxiliary_outputs + + loss_dict = criterion(outputs_loss, labels) + # Fourth: compute total loss, as a weighted sum of the various losses + weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} + weight_dict["loss_giou"] = self.config.giou_loss_coefficient + weight_dict["loss_mask"] = self.config.mask_loss_coefficient + weight_dict["loss_dice"] = self.config.dice_loss_coefficient + if self.config.auxiliary_loss: + aux_weight_dict = {} + for i in range(self.config.decoder_layers - 1): + aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) + + if not return_dict: + if auxiliary_outputs is not None: + output = (logits, pred_boxes, pred_masks) + auxiliary_outputs + decoder_outputs + encoder_outputs + else: + output = (logits, pred_boxes, pred_masks) + decoder_outputs + encoder_outputs + return ((loss, loss_dict) + output) if loss is not None else output + + return ConditionalDetrSegmentationOutput( + loss=loss, + loss_dict=loss_dict, + logits=logits, + pred_boxes=pred_boxes, + pred_masks=pred_masks, + auxiliary_outputs=auxiliary_outputs, + last_hidden_state=decoder_outputs.last_hidden_state, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +def _expand(tensor, length: int): + return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) + + +# taken from https://github.com/facebookresearch/detr/blob/master/models/segmentation.py +# Copied from transformers.models.detr.modeling_detr.DetrMaskHeadSmallConv with Detr->ConditionalDetr +class ConditionalDetrMaskHeadSmallConv(nn.Module): + """ + Simple convolutional head, using group norm. Upsampling is done using a FPN approach + """ + + def __init__(self, dim, fpn_dims, context_dim): + super().__init__() + + if dim % 8 != 0: + raise ValueError( + "The hidden_size + number of attention heads must be divisible by 8 as the number of groups in" + " GroupNorm is set to 8" + ) + + inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] + + self.lay1 = nn.Conv2d(dim, dim, 3, padding=1) + self.gn1 = nn.GroupNorm(8, dim) + self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1) + self.gn2 = nn.GroupNorm(8, inter_dims[1]) + self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) + self.gn3 = nn.GroupNorm(8, inter_dims[2]) + self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) + self.gn4 = nn.GroupNorm(8, inter_dims[3]) + self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) + self.gn5 = nn.GroupNorm(8, inter_dims[4]) + self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1) + + self.dim = dim + + self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1) + self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1) + self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_uniform_(m.weight, a=1) + nn.init.constant_(m.bias, 0) + + def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): + # here we concatenate x, the projected feature map, of shape (batch_size, d_model, heigth/32, width/32) with + # the bbox_mask = the attention maps of shape (batch_size, n_queries, n_heads, height/32, width/32). + # We expand the projected feature map to match the number of heads. + x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) + + x = self.lay1(x) + x = self.gn1(x) + x = nn.functional.relu(x) + x = self.lay2(x) + x = self.gn2(x) + x = nn.functional.relu(x) + + cur_fpn = self.adapter1(fpns[0]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) + x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay3(x) + x = self.gn3(x) + x = nn.functional.relu(x) + + cur_fpn = self.adapter2(fpns[1]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) + x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay4(x) + x = self.gn4(x) + x = nn.functional.relu(x) + + cur_fpn = self.adapter3(fpns[2]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) + x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay5(x) + x = self.gn5(x) + x = nn.functional.relu(x) + + x = self.out_lay(x) + return x + + +# Copied from transformers.models.detr.modeling_detr.DetrMHAttentionMap with Detr->ConditionalDetr +class ConditionalDetrMHAttentionMap(nn.Module): + """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" + + def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): + super().__init__() + self.num_heads = num_heads + self.hidden_dim = hidden_dim + self.dropout = nn.Dropout(dropout) + + self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) + self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) + + self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 + + def forward(self, q, k, mask: Optional[Tensor] = None): + q = self.q_linear(q) + k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) + queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) + keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) + weights = torch.einsum("bqnc,bnchw->bqnhw", queries_per_head * self.normalize_fact, keys_per_head) + + if mask is not None: + weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min) + weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) + weights = self.dropout(weights) + return weights + + +def dice_loss(inputs, targets, num_boxes): + """ + Compute the DICE loss, similar to generalized IOU for masks + + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs (0 for the negative class and 1 for the positive + class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_boxes + + +def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs (0 for the negative class and 1 for the positive + class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples. Default = -1 (no weighting). + gamma: Exponent of the modulating factor (1 - p_t) to + balance easy vs hard examples. + + Returns: + Loss tensor + """ + prob = inputs.sigmoid() + ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + p_t = prob * targets + (1 - prob) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + return loss.mean(1).sum() / num_boxes + + +# taken from https://github.com/Atten4Vis/conditionalDETR/blob/master/models/conditional_detr.py +class ConditionalDetrLoss(nn.Module): + """ + This class computes the losses for ConditionalDetrForObjectDetection/ConditionalDetrForSegmentation. The process + happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) + we supervise each pair of matched ground-truth / prediction (supervise class and box). + + + + Args: + matcher (`ConditionalDetrHungarianMatcher`): + Module able to compute a matching between targets and proposals. + num_classes (`int`): + Number of object categories, omitting the special no-object category. + focal_alpha (`float`): + Alpha parmeter in focal loss. + losses (`List[str]`): + List of all the losses to be applied. See `get_loss` for a list of all available losses. + """ + + def __init__(self, matcher, num_classes, focal_alpha, losses): + super().__init__() + self.matcher = matcher + self.num_classes = num_classes + self.focal_alpha = focal_alpha + self.losses = losses + + # removed logging parameter, which was part of the original implementation + def loss_labels(self, outputs, targets, indices, num_boxes): + """ + Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor + of dim [nb_target_boxes] + """ + if "logits" not in outputs: + raise KeyError("No logits were found in the outputs") + src_logits = outputs["logits"] + + idx = self._get_src_permutation_idx(indices) + target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) + target_classes = torch.full( + src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device + ) + target_classes[idx] = target_classes_o + + target_classes_onehot = torch.zeros( + [src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1], + dtype=src_logits.dtype, + layout=src_logits.layout, + device=src_logits.device, + ) + target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) + + target_classes_onehot = target_classes_onehot[:, :, :-1] + loss_ce = ( + sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) + * src_logits.shape[1] + ) + losses = {"loss_ce": loss_ce} + + return losses + + @torch.no_grad() + def loss_cardinality(self, outputs, targets, indices, num_boxes): + """ + Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. + + This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. + """ + logits = outputs["logits"] + device = logits.device + tgt_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) + # Count the number of predictions that are NOT "no-object" (which is the last class) + card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) + card_err = nn.functional.l1_loss(card_pred.float(), tgt_lengths.float()) + losses = {"cardinality_error": card_err} + return losses + + def loss_boxes(self, outputs, targets, indices, num_boxes): + """ + Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. + + Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes + are expected in format (center_x, center_y, w, h), normalized by the image size. + """ + if "pred_boxes" not in outputs: + raise KeyError("No predicted boxes found in outputs") + idx = self._get_src_permutation_idx(indices) + src_boxes = outputs["pred_boxes"][idx] + target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) + + loss_bbox = nn.functional.l1_loss(src_boxes, target_boxes, reduction="none") + + losses = {} + losses["loss_bbox"] = loss_bbox.sum() / num_boxes + + loss_giou = 1 - torch.diag( + generalized_box_iou(center_to_corners_format(src_boxes), center_to_corners_format(target_boxes)) + ) + losses["loss_giou"] = loss_giou.sum() / num_boxes + return losses + + def loss_masks(self, outputs, targets, indices, num_boxes): + """ + Compute the losses related to the masks: the focal loss and the dice loss. + + Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. + """ + if "pred_masks" not in outputs: + raise KeyError("No predicted masks found in outputs") + + src_idx = self._get_src_permutation_idx(indices) + tgt_idx = self._get_tgt_permutation_idx(indices) + src_masks = outputs["pred_masks"] + src_masks = src_masks[src_idx] + masks = [t["masks"] for t in targets] + # TODO use valid to mask invalid areas due to padding in loss + target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() + target_masks = target_masks.to(src_masks) + target_masks = target_masks[tgt_idx] + + # upsample predictions to the target size + src_masks = nn.functional.interpolate( + src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False + ) + src_masks = src_masks[:, 0].flatten(1) + + target_masks = target_masks.flatten(1) + target_masks = target_masks.view(src_masks.shape) + losses = { + "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), + "loss_dice": dice_loss(src_masks, target_masks, num_boxes), + } + return losses + + def _get_src_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + def _get_tgt_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) + tgt_idx = torch.cat([tgt for (_, tgt) in indices]) + return batch_idx, tgt_idx + + def get_loss(self, loss, outputs, targets, indices, num_boxes): + loss_map = { + "labels": self.loss_labels, + "cardinality": self.loss_cardinality, + "boxes": self.loss_boxes, + "masks": self.loss_masks, + } + if loss not in loss_map: + raise ValueError(f"Loss {loss} not supported") + return loss_map[loss](outputs, targets, indices, num_boxes) + + def forward(self, outputs, targets): + """ + This performs the loss computation. + + Args: + outputs (`dict`, *optional*): + Dictionary of tensors, see the output specification of the model for the format. + targets (`List[dict]`, *optional*): + List of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the + losses applied, see each loss' doc. + """ + outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} + + # Retrieve the matching between the outputs of the last layer and the targets + indices = self.matcher(outputs_without_aux, targets) + + # Compute the average number of target boxes accross all nodes, for normalization purposes + num_boxes = sum(len(t["class_labels"]) for t in targets) + num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) + # (Niels): comment out function below, distributed training to be added + # if is_dist_avail_and_initialized(): + # torch.distributed.all_reduce(num_boxes) + # (Niels) in original implementation, num_boxes is divided by get_world_size() + num_boxes = torch.clamp(num_boxes, min=1).item() + + # Compute all the requested losses + losses = {} + for loss in self.losses: + losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) + + # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if "auxiliary_outputs" in outputs: + for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): + indices = self.matcher(auxiliary_outputs, targets) + for loss in self.losses: + if loss == "masks": + # Intermediate masks losses are too costly to compute, we ignore them. + continue + l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) + l_dict = {k + f"_{i}": v for k, v in l_dict.items()} + losses.update(l_dict) + + return losses + + +# Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->ConditionalDetr +class ConditionalDetrMLPPredictionHead(nn.Module): + """ + Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, + height and width of a bounding box w.r.t. an image. + + Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py + + """ + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +class ConditionalDetrHungarianMatcher(nn.Module): + """ + This class computes an assignment between the targets and the predictions of the network. + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more + predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are + un-matched (and thus treated as non-objects). + + Args: + class_cost: + The relative weight of the classification error in the matching cost. + bbox_cost: + The relative weight of the L1 error of the bounding box coordinates in the matching cost. + giou_cost: + The relative weight of the giou loss of the bounding box in the matching cost. + """ + + def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): + super().__init__() + requires_backends(self, ["scipy"]) + + self.class_cost = class_cost + self.bbox_cost = bbox_cost + self.giou_cost = giou_cost + if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: + raise ValueError("All costs of the Matcher can't be 0") + + @torch.no_grad() + def forward(self, outputs, targets): + """ + Args: + outputs (`dict`): + A dictionary that contains at least these entries: + * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. + targets (`List[dict]`): + A list of targets (len(targets) = batch_size), where each target is a dict containing: + * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of + ground-truth + objects in the target) containing the class labels + * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. + + Returns: + `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + batch_size, num_queries = outputs["logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + tgt_ids = torch.cat([v["class_labels"] for v in targets]) + tgt_bbox = torch.cat([v["boxes"] for v in targets]) + + # Compute the classification cost. + alpha = 0.25 + gamma = 2.0 + neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) + pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) + class_cost = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids] + + # Compute the L1 cost between boxes + bbox_cost = torch.cdist(out_bbox, tgt_bbox, p=1) + + # Compute the giou cost between boxes + giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(tgt_bbox)) + + # Final cost matrix + cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost + cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() + + sizes = [len(v["boxes"]) for v in targets] + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] + return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] + + +# below: bounding box utilities taken from https://github.com/facebookresearch/detr/blob/master/util/box_ops.py + + +def _upcast(t: Tensor) -> Tensor: + # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type + if t.is_floating_point(): + return t if t.dtype in (torch.float32, torch.float64) else t.float() + else: + return t if t.dtype in (torch.int32, torch.int64) else t.int() + + +# Copied from transformers.models.detr.modeling_detr.box_area +def box_area(boxes: Tensor) -> Tensor: + """ + Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. + + Args: + boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): + Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 + < x2` and `0 <= y1 < y2`. + + Returns: + `torch.FloatTensor`: a tensor containing the area for each box. + """ + boxes = _upcast(boxes) + return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + + +# Copied from transformers.models.detr.modeling_detr.box_iou +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] + inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + return iou, union + + +# Copied from transformers.models.detr.modeling_detr.generalized_box_iou +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. + + Returns: + `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): + raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") + if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): + raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") + iou, union = box_iou(boxes1, boxes2) + + top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] + area = width_height[:, :, 0] * width_height[:, :, 1] + + return iou - (area - union) / area + + +# below: taken from https://github.com/facebookresearch/detr/blob/master/util/misc.py#L306 + + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +# Copied from transformers.models.detr.modeling_detr.NestedTensor +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +# Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + if tensor_list[0].ndim == 3: + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + batch_shape = [len(tensor_list)] + max_size + batch_size, num_channels, height, width = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], : img.shape[2]] = False + else: + raise ValueError("Only 3-dimensional tensors are supported") + return NestedTensor(tensor, mask) diff --git a/src/transformers/utils/dummy_timm_and_vision_objects.py b/src/transformers/utils/dummy_timm_and_vision_objects.py index e990c33d2d317a..0696a17d3778ae 100644 --- a/src/transformers/utils/dummy_timm_and_vision_objects.py +++ b/src/transformers/utils/dummy_timm_and_vision_objects.py @@ -3,6 +3,37 @@ from ..utils import DummyObject, requires_backends +CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ConditionalDetrForObjectDetection(metaclass=DummyObject): + _backends = ["timm", "vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["timm", "vision"]) + + +class ConditionalDetrForSegmentation(metaclass=DummyObject): + _backends = ["timm", "vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["timm", "vision"]) + + +class ConditionalDetrModel(metaclass=DummyObject): + _backends = ["timm", "vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["timm", "vision"]) + + +class ConditionalDetrPreTrainedModel(metaclass=DummyObject): + _backends = ["timm", "vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["timm", "vision"]) + + DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index e1f4f3b1fd9fa1..d2ec5be33ceb8c 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -24,6 +24,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) +class ConditionalDetrFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + class ConvNextFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] diff --git a/tests/models/conditional_detr/__init__.py b/tests/models/conditional_detr/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/conditional_detr/test_feature_extraction_conditional_detr.py b/tests/models/conditional_detr/test_feature_extraction_conditional_detr.py new file mode 100644 index 00000000000000..ddf7e66ef72fe6 --- /dev/null +++ b/tests/models/conditional_detr/test_feature_extraction_conditional_detr.py @@ -0,0 +1,342 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import pathlib +import unittest + +import numpy as np + +from transformers.testing_utils import require_torch, require_vision, slow +from transformers.utils import is_torch_available, is_vision_available + +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs + + +if is_torch_available(): + import torch + +if is_vision_available(): + from PIL import Image + + from transformers import ConditionalDetrFeatureExtractor + + +class ConditionalDetrFeatureExtractionTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + num_channels=3, + min_resolution=30, + max_resolution=400, + do_resize=True, + size=18, + max_size=1333, # by setting max_size > max_resolution we're effectively not testing this :p + do_normalize=True, + image_mean=[0.5, 0.5, 0.5], + image_std=[0.5, 0.5, 0.5], + ): + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.min_resolution = min_resolution + self.max_resolution = max_resolution + self.do_resize = do_resize + self.size = size + self.max_size = max_size + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + + def prepare_feat_extract_dict(self): + return { + "do_resize": self.do_resize, + "size": self.size, + "max_size": self.max_size, + "do_normalize": self.do_normalize, + "image_mean": self.image_mean, + "image_std": self.image_std, + } + + def get_expected_values(self, image_inputs, batched=False): + """ + This function computes the expected height and width when providing images to ConditionalDetrFeatureExtractor, + assuming do_resize is set to True with a scalar size. + """ + if not batched: + image = image_inputs[0] + if isinstance(image, Image.Image): + w, h = image.size + else: + h, w = image.shape[1], image.shape[2] + if w < h: + expected_height = int(self.size * h / w) + expected_width = self.size + elif w > h: + expected_height = self.size + expected_width = int(self.size * w / h) + else: + expected_height = self.size + expected_width = self.size + + else: + expected_values = [] + for image in image_inputs: + expected_height, expected_width = self.get_expected_values([image]) + expected_values.append((expected_height, expected_width)) + expected_height = max(expected_values, key=lambda item: item[0])[0] + expected_width = max(expected_values, key=lambda item: item[1])[1] + + return expected_height, expected_width + + +@require_torch +@require_vision +class ConditionalDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): + + feature_extraction_class = ConditionalDetrFeatureExtractor if is_vision_available() else None + + def setUp(self): + self.feature_extract_tester = ConditionalDetrFeatureExtractionTester(self) + + @property + def feat_extract_dict(self): + return self.feature_extract_tester.prepare_feat_extract_dict() + + def test_feat_extract_properties(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + self.assertTrue(hasattr(feature_extractor, "image_mean")) + self.assertTrue(hasattr(feature_extractor, "image_std")) + self.assertTrue(hasattr(feature_extractor, "do_normalize")) + self.assertTrue(hasattr(feature_extractor, "do_resize")) + self.assertTrue(hasattr(feature_extractor, "size")) + self.assertTrue(hasattr(feature_extractor, "max_size")) + + def test_batch_feature(self): + pass + + def test_call_pil(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PIL images + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + for image in image_inputs: + self.assertIsInstance(image, Image.Image) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + + self.assertEqual( + encoded_images.shape, + (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + ) + + # Test batched + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + expected_height, + expected_width, + ), + ) + + def test_call_numpy(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random numpy tensors + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + for image in image_inputs: + self.assertIsInstance(image, np.ndarray) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + + self.assertEqual( + encoded_images.shape, + (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + expected_height, + expected_width, + ), + ) + + def test_call_pytorch(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PyTorch tensors + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + for image in image_inputs: + self.assertIsInstance(image, torch.Tensor) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + + self.assertEqual( + encoded_images.shape, + (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + expected_height, + expected_width, + ), + ) + + def test_equivalence_pad_and_create_pixel_mask(self): + # Initialize feature_extractors + feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict) + feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False) + # create random PyTorch tensors + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + for image in image_inputs: + self.assertIsInstance(image, torch.Tensor) + + # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors + encoded_images_with_method = feature_extractor_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") + encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") + + self.assertTrue( + torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) + ) + self.assertTrue( + torch.allclose(encoded_images_with_method["pixel_mask"], encoded_images["pixel_mask"], atol=1e-4) + ) + + @slow + def test_call_pytorch_with_coco_detection_annotations(self): + # prepare image and target + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: + target = json.loads(f.read()) + + target = {"image_id": 39769, "annotations": target} + + # encode them + feature_extractor = ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") + encoding = feature_extractor(images=image, annotations=target, return_tensors="pt") + + # verify pixel values + expected_shape = torch.Size([1, 3, 800, 1066]) + self.assertEqual(encoding["pixel_values"].shape, expected_shape) + + expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) + self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) + + # verify area + expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) + self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) + # verify boxes + expected_boxes_shape = torch.Size([6, 4]) + self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) + expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) + self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) + # verify image_id + expected_image_id = torch.tensor([39769]) + self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) + # verify is_crowd + expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) + self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) + # verify class_labels + expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) + self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) + # verify orig_size + expected_orig_size = torch.tensor([480, 640]) + self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) + # verify size + expected_size = torch.tensor([800, 1066]) + self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) + + @slow + def test_call_pytorch_with_coco_panoptic_annotations(self): + # prepare image, target and masks_path + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: + target = json.loads(f.read()) + + target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} + + masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") + + # encode them + # TODO replace by .from_pretrained microsoft/conditional-detr-resnet-50-panoptic + feature_extractor = ConditionalDetrFeatureExtractor(format="coco_panoptic") + encoding = feature_extractor(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") + + # verify pixel values + expected_shape = torch.Size([1, 3, 800, 1066]) + self.assertEqual(encoding["pixel_values"].shape, expected_shape) + + expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) + self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) + + # verify area + expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) + self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) + # verify boxes + expected_boxes_shape = torch.Size([6, 4]) + self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) + expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) + self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) + # verify image_id + expected_image_id = torch.tensor([39769]) + self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) + # verify is_crowd + expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) + self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) + # verify class_labels + expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) + self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) + # verify masks + expected_masks_sum = 822338 + self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) + # verify orig_size + expected_orig_size = torch.tensor([480, 640]) + self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) + # verify size + expected_size = torch.tensor([800, 1066]) + self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) diff --git a/tests/models/conditional_detr/test_modeling_conditional_detr.py b/tests/models/conditional_detr/test_modeling_conditional_detr.py new file mode 100644 index 00000000000000..2c7be4d8f2b56a --- /dev/null +++ b/tests/models/conditional_detr/test_modeling_conditional_detr.py @@ -0,0 +1,542 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch CONDITIONAL_DETR model. """ + + +import inspect +import math +import unittest + +from transformers import ConditionalDetrConfig, is_timm_available, is_vision_available +from transformers.testing_utils import require_timm, require_vision, slow, torch_device +from transformers.utils import cached_property + +from ...generation.test_generation_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor + + +if is_timm_available(): + import torch + + from transformers import ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel + + +if is_vision_available(): + from PIL import Image + + from transformers import ConditionalDetrFeatureExtractor + + +class ConditionalDetrModelTester: + def __init__( + self, + parent, + batch_size=8, + is_training=True, + use_labels=True, + hidden_size=256, + num_hidden_layers=2, + num_attention_heads=8, + intermediate_size=4, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + num_queries=12, + num_channels=3, + min_size=200, + max_size=200, + n_targets=8, + num_labels=91, + ): + self.parent = parent + self.batch_size = batch_size + self.is_training = is_training + self.use_labels = use_labels + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.num_queries = num_queries + self.num_channels = num_channels + self.min_size = min_size + self.max_size = max_size + self.n_targets = n_targets + self.num_labels = num_labels + + # we also set the expected seq length for both encoder and decoder + self.encoder_seq_length = math.ceil(self.min_size / 32) * math.ceil(self.max_size / 32) + self.decoder_seq_length = self.num_queries + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]) + + pixel_mask = torch.ones([self.batch_size, self.min_size, self.max_size], device=torch_device) + + labels = None + if self.use_labels: + # labels is a list of Dict (each Dict being the labels for a given example in the batch) + labels = [] + for i in range(self.batch_size): + target = {} + target["class_labels"] = torch.randint( + high=self.num_labels, size=(self.n_targets,), device=torch_device + ) + target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) + target["masks"] = torch.rand(self.n_targets, self.min_size, self.max_size, device=torch_device) + labels.append(target) + + config = self.get_config() + return config, pixel_values, pixel_mask, labels + + def get_config(self): + return ConditionalDetrConfig( + d_model=self.hidden_size, + encoder_layers=self.num_hidden_layers, + decoder_layers=self.num_hidden_layers, + encoder_attention_heads=self.num_attention_heads, + decoder_attention_heads=self.num_attention_heads, + encoder_ffn_dim=self.intermediate_size, + decoder_ffn_dim=self.intermediate_size, + dropout=self.hidden_dropout_prob, + attention_dropout=self.attention_probs_dropout_prob, + num_queries=self.num_queries, + num_labels=self.num_labels, + ) + + def prepare_config_and_inputs_for_common(self): + config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() + inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} + return config, inputs_dict + + def create_and_check_conditional_detr_model(self, config, pixel_values, pixel_mask, labels): + model = ConditionalDetrModel(config=config) + model.to(torch_device) + model.eval() + + result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) + result = model(pixel_values) + + self.parent.assertEqual( + result.last_hidden_state.shape, (self.batch_size, self.decoder_seq_length, self.hidden_size) + ) + + def create_and_check_conditional_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): + model = ConditionalDetrForObjectDetection(config=config) + model.to(torch_device) + model.eval() + + result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) + result = model(pixel_values) + + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) + self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) + + result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) + + self.parent.assertEqual(result.loss.shape, ()) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) + self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) + + +@require_timm +class ConditionalDetrModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = ( + ( + ConditionalDetrModel, + ConditionalDetrForObjectDetection, + ConditionalDetrForSegmentation, + ) + if is_timm_available() + else () + ) + is_encoder_decoder = True + test_torchscript = False + test_pruning = False + test_head_masking = False + test_missing_keys = False + + # special case for head models + def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): + inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) + + if return_labels: + if model_class.__name__ in ["ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation"]: + labels = [] + for i in range(self.model_tester.batch_size): + target = {} + target["class_labels"] = torch.ones( + size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long + ) + target["boxes"] = torch.ones( + self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float + ) + target["masks"] = torch.ones( + self.model_tester.n_targets, + self.model_tester.min_size, + self.model_tester.max_size, + device=torch_device, + dtype=torch.float, + ) + labels.append(target) + inputs_dict["labels"] = labels + + return inputs_dict + + def setUp(self): + self.model_tester = ConditionalDetrModelTester(self) + self.config_tester = ConfigTester(self, config_class=ConditionalDetrConfig, has_text_modality=False) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_conditional_detr_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_conditional_detr_model(*config_and_inputs) + + def test_conditional_detr_object_detection_head_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_conditional_detr_object_detection_head_model(*config_and_inputs) + + @unittest.skip(reason="CONDITIONAL_DETR does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="CONDITIONAL_DETR does not have a get_input_embeddings method") + def test_model_common_attributes(self): + pass + + @unittest.skip(reason="CONDITIONAL_DETR is not a generative model") + def test_generate_without_input_ids(self): + pass + + @unittest.skip(reason="CONDITIONAL_DETR does not use token embeddings") + def test_resize_tokens_embeddings(self): + pass + + @slow + def test_model_outputs_equivalence(self): + # TODO Niels: fix me! + pass + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + decoder_seq_length = self.model_tester.decoder_seq_length + encoder_seq_length = self.model_tester.encoder_seq_length + decoder_key_length = self.model_tester.decoder_seq_length + encoder_key_length = self.model_tester.encoder_seq_length + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + self.assertListEqual( + list(attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], + ) + out_len = len(outputs) + + if self.is_encoder_decoder: + correct_outlen = 6 + + # loss is at first position + if "labels" in inputs_dict: + correct_outlen += 1 # loss is added to beginning + # Object Detection model returns pred_logits and pred_boxes + if model_class.__name__ == "ConditionalDetrForObjectDetection": + correct_outlen += 1 + # Panoptic Segmentation model returns pred_logits, pred_boxes, pred_masks + if model_class.__name__ == "ConditionalDetrForSegmentation": + correct_outlen += 2 + if "past_key_values" in outputs: + correct_outlen += 1 # past_key_values have been returned + + self.assertEqual(out_len, correct_outlen) + + # decoder attentions + decoder_attentions = outputs.decoder_attentions + self.assertIsInstance(decoder_attentions, (list, tuple)) + self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(decoder_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], + ) + + # cross attentions + cross_attentions = outputs.cross_attentions + self.assertIsInstance(cross_attentions, (list, tuple)) + self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(cross_attentions[0].shape[-3:]), + [ + self.model_tester.num_attention_heads, + decoder_seq_length, + encoder_key_length, + ], + ) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if hasattr(self.model_tester, "num_hidden_states_types"): + added_hidden_states = self.model_tester.num_hidden_states_types + elif self.is_encoder_decoder: + added_hidden_states = 2 + else: + added_hidden_states = 1 + self.assertEqual(out_len + added_hidden_states, len(outputs)) + + self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + + self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(self_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], + ) + + def test_retain_grad_hidden_states_attentions(self): + # removed retain_grad and grad on decoder_hidden_states, as queries don't require grad + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.output_hidden_states = True + config.output_attentions = True + + # no need to test all models as different heads yield the same functionality + model_class = self.all_model_classes[0] + model = model_class(config) + model.to(torch_device) + + inputs = self._prepare_for_class(inputs_dict, model_class) + + outputs = model(**inputs) + + output = outputs[0] + + encoder_hidden_states = outputs.encoder_hidden_states[0] + encoder_attentions = outputs.encoder_attentions[0] + encoder_hidden_states.retain_grad() + encoder_attentions.retain_grad() + + decoder_attentions = outputs.decoder_attentions[0] + decoder_attentions.retain_grad() + + cross_attentions = outputs.cross_attentions[0] + cross_attentions.retain_grad() + + output.flatten()[0].backward(retain_graph=True) + + self.assertIsNotNone(encoder_hidden_states.grad) + self.assertIsNotNone(encoder_attentions.grad) + self.assertIsNotNone(decoder_attentions.grad) + self.assertIsNotNone(cross_attentions.grad) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + if model.config.is_encoder_decoder: + expected_arg_names = ["pixel_values", "pixel_mask"] + expected_arg_names.extend( + ["head_mask", "decoder_head_mask", "encoder_outputs"] + if "head_mask" and "decoder_head_mask" in arg_names + else [] + ) + self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) + else: + expected_arg_names = ["pixel_values", "pixel_mask"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_different_timm_backbone(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + # let's pick a random timm backbone + config.backbone = "tf_mobilenetv3_small_075" + + for model_class in self.all_model_classes: + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + if model_class.__name__ == "ConditionalDetrForObjectDetection": + expected_shape = ( + self.model_tester.batch_size, + self.model_tester.num_queries, + self.model_tester.num_labels, + ) + self.assertEqual(outputs.logits.shape, expected_shape) + + self.assertTrue(outputs) + + def test_initialization(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + configs_no_init = _config_zero_init(config) + configs_no_init.init_xavier_std = 1e9 + + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + for name, param in model.named_parameters(): + if param.requires_grad: + if "bbox_attention" in name and "bias" not in name: + self.assertLess( + 100000, + abs(param.data.max().item()), + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + else: + self.assertIn( + ((param.data.mean() * 1e9).round() / 1e9).item(), + [0.0, 1.0], + msg=f"Parameter {name} of model {model_class} seems not properly initialized", + ) + + +TOLERANCE = 1e-4 + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_timm +@require_vision +@slow +class ConditionalDetrModelIntegrationTests(unittest.TestCase): + @cached_property + def default_feature_extractor(self): + return ( + ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") + if is_vision_available() + else None + ) + + def test_inference_no_head(self): + model = ConditionalDetrModel.from_pretrained("microsoft/conditional-detr-resnet-50").to(torch_device) + + feature_extractor = self.default_feature_extractor + image = prepare_img() + encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) + + with torch.no_grad(): + outputs = model(**encoding) + + expected_shape = torch.Size((1, 300, 256)) + self.assertEqual(outputs.last_hidden_state.shape, expected_shape) + expected_slice = torch.tensor( + [[0.0616, -0.5146, -0.4032], [-0.7629, -0.4934, -1.7153], [-0.4768, -0.6403, -0.7826]] + ).to(torch_device) + self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) + + def test_inference_object_detection_head(self): + model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50").to( + torch_device + ) + + feature_extractor = self.default_feature_extractor + image = prepare_img() + encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) + pixel_values = encoding["pixel_values"].to(torch_device) + pixel_mask = encoding["pixel_mask"].to(torch_device) + + with torch.no_grad(): + outputs = model(pixel_values, pixel_mask) + + expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) + self.assertEqual(outputs.logits.shape, expected_shape_logits) + expected_slice_logits = torch.tensor( + [[-19.1194, -0.0893, -11.0154], [-17.3640, -1.8035, -14.0219], [-20.0461, -0.5837, -11.1060]] + ).to(torch_device) + self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) + + expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) + self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) + expected_slice_boxes = torch.tensor( + [[0.4433, 0.5302, 0.8853], [0.5494, 0.2517, 0.0529], [0.4998, 0.5360, 0.9956]] + ).to(torch_device) + self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) + + def test_inference_panoptic_segmentation_head(self): + model = ConditionalDetrForSegmentation.from_pretrained("microsoft/conditional-detr-resnet-50-panoptic").to( + torch_device + ) + + feature_extractor = self.default_feature_extractor + image = prepare_img() + encoding = feature_extractor(images=image, return_tensors="pt").to(torch_device) + pixel_values = encoding["pixel_values"].to(torch_device) + pixel_mask = encoding["pixel_mask"].to(torch_device) + + with torch.no_grad(): + outputs = model(pixel_values, pixel_mask) + + expected_shape_logits = torch.Size((1, model.config.num_queries, model.config.num_labels)) + self.assertEqual(outputs.logits.shape, expected_shape_logits) + expected_slice_logits = torch.tensor( + [[-18.1565, -1.7568, -13.5029], [-16.8888, -1.4138, -14.1028], [-17.5709, -2.5080, -11.8654]] + ).to(torch_device) + self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice_logits, atol=1e-4)) + + expected_shape_boxes = torch.Size((1, model.config.num_queries, 4)) + self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) + expected_slice_boxes = torch.tensor( + [[0.5344, 0.1789, 0.9285], [0.4420, 0.0572, 0.0875], [0.6630, 0.6887, 0.1017]] + ).to(torch_device) + self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], expected_slice_boxes, atol=1e-4)) + + expected_shape_masks = torch.Size((1, model.config.num_queries, 200, 267)) + self.assertEqual(outputs.pred_masks.shape, expected_shape_masks) + expected_slice_masks = torch.tensor( + [[-7.7558, -10.8788, -11.9797], [-11.8881, -16.4329, -17.7451], [-14.7316, -19.7383, -20.3004]] + ).to(torch_device) + self.assertTrue(torch.allclose(outputs.pred_masks[0, 0, :3, :3], expected_slice_masks, atol=1e-3)) diff --git a/utils/check_repo.py b/utils/check_repo.py index ea3b997f1f1261..988967e797d12e 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -60,6 +60,8 @@ "DetrEncoder", # Building part of bigger (tested) model. "DetrDecoder", # Building part of bigger (tested) model. "DetrDecoderWrapper", # Building part of bigger (tested) model. + "ConditionalDetrEncoder", # Building part of bigger (tested) model. + "ConditionalDetrDecoder", # Building part of bigger (tested) model. "M2M100Encoder", # Building part of bigger (tested) model. "M2M100Decoder", # Building part of bigger (tested) model. "MCTCTEncoder", # Building part of bigger (tested) model. @@ -165,6 +167,7 @@ "FlaxCLIPVisionModel", "FlaxWav2Vec2ForCTC", "DetrForSegmentation", + "ConditionalDetrForSegmentation", "DPRReader", "FlaubertForQuestionAnswering", "FlavaImageCodebook", From 9393f966bcebd112b25f84e46af6e349525406c7 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Thu, 22 Sep 2022 09:45:24 +0200 Subject: [PATCH 362/539] [fix] Add DeformableDetrFeatureExtractor (#19140) * Add DeformableDetrFeatureExtractor * Fix post_process * Fix name * Add tests for feature extractor * Fix doc tests * Fix name * Address comments * Apply same fix to DETR and YOLOS as well Co-authored-by: Niels Rogge --- docs/source/en/model_doc/deformable_detr.mdx | 10 + src/transformers/__init__.py | 2 + .../models/auto/feature_extraction_auto.py | 2 +- .../models/deformable_detr/__init__.py | 18 +- .../convert_deformable_detr_to_pytorch.py | 4 +- .../feature_extraction_deformable_detr.py | 946 ++++++++++++++++++ .../modeling_deformable_detr.py | 10 +- .../models/detr/feature_extraction_detr.py | 8 +- .../models/yolos/feature_extraction_yolos.py | 6 +- .../utils/dummy_vision_objects.py | 7 + ...test_feature_extraction_deformable_detr.py | 341 +++++++ .../detr/test_feature_extraction_detr.py | 43 +- .../yolos/test_feature_extraction_yolos.py | 36 +- 13 files changed, 1380 insertions(+), 53 deletions(-) create mode 100644 src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py create mode 100644 tests/models/deformable_detr/test_feature_extraction_deformable_detr.py diff --git a/docs/source/en/model_doc/deformable_detr.mdx b/docs/source/en/model_doc/deformable_detr.mdx index 7997b2f19d2e82..08cbbb0aa02189 100644 --- a/docs/source/en/model_doc/deformable_detr.mdx +++ b/docs/source/en/model_doc/deformable_detr.mdx @@ -33,6 +33,16 @@ alt="drawing" width="600"/> This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/fundamentalvision/Deformable-DETR). +## DeformableDetrFeatureExtractor + +[[autodoc]] DeformableDetrFeatureExtractor + - __call__ + - pad_and_create_pixel_mask + - post_process + - post_process_segmentation + - post_process_panoptic + + ## DeformableDetrConfig [[autodoc]] DeformableDetrConfig diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 6abc53c85008e3..123c56bdd53267 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -659,6 +659,7 @@ _import_structure["models.beit"].append("BeitFeatureExtractor") _import_structure["models.clip"].append("CLIPFeatureExtractor") _import_structure["models.convnext"].append("ConvNextFeatureExtractor") + _import_structure["models.deformable_detr"].append("DeformableDetrFeatureExtractor") _import_structure["models.deit"].append("DeiTFeatureExtractor") _import_structure["models.detr"].append("DetrFeatureExtractor") _import_structure["models.conditional_detr"].append("ConditionalDetrFeatureExtractor") @@ -3512,6 +3513,7 @@ from .models.clip import CLIPFeatureExtractor from .models.conditional_detr import ConditionalDetrFeatureExtractor from .models.convnext import ConvNextFeatureExtractor + from .models.deformable_detr import DeformableDetrFeatureExtractor from .models.deit import DeiTFeatureExtractor from .models.detr import DetrFeatureExtractor from .models.donut import DonutFeatureExtractor diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index cb75f439c233d3..3f2265875f2a69 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -44,7 +44,7 @@ ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), - ("deformable_detr", "DetrFeatureExtractor"), + ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("donut", "DonutFeatureExtractor"), diff --git a/src/transformers/models/deformable_detr/__init__.py b/src/transformers/models/deformable_detr/__init__.py index f70d937c7ff468..36c1a83b0eee4e 100644 --- a/src/transformers/models/deformable_detr/__init__.py +++ b/src/transformers/models/deformable_detr/__init__.py @@ -18,13 +18,21 @@ from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_timm_available +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_timm_available, is_vision_available _import_structure = { "configuration_deformable_detr": ["DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeformableDetrConfig"], } +try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["feature_extraction_deformable_detr"] = ["DeformableDetrFeatureExtractor"] + try: if not is_timm_available(): raise OptionalDependencyNotAvailable() @@ -42,6 +50,14 @@ if TYPE_CHECKING: from .configuration_deformable_detr import DEFORMABLE_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, DeformableDetrConfig + try: + if not is_vision_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .feature_extraction_deformable_detr import DeformableDetrFeatureExtractor + try: if not is_timm_available(): raise OptionalDependencyNotAvailable() diff --git a/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py b/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py index 85ee723fb33968..30726c5e9744c6 100644 --- a/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py +++ b/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py @@ -24,7 +24,7 @@ import requests from huggingface_hub import cached_download, hf_hub_url -from transformers import DeformableDetrConfig, DeformableDetrForObjectDetection, DetrFeatureExtractor +from transformers import DeformableDetrConfig, DeformableDetrFeatureExtractor, DeformableDetrForObjectDetection from transformers.utils import logging @@ -116,7 +116,7 @@ def convert_deformable_detr_checkpoint( config.label2id = {v: k for k, v in id2label.items()} # load feature extractor - feature_extractor = DetrFeatureExtractor(format="coco_detection") + feature_extractor = DeformableDetrFeatureExtractor(format="coco_detection") # prepare image img = prepare_img() diff --git a/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py b/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py new file mode 100644 index 00000000000000..415df84fd196b8 --- /dev/null +++ b/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py @@ -0,0 +1,946 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Feature extractor class for Deformable DETR.""" + +import io +import pathlib +from collections import defaultdict +from typing import Dict, List, Optional, Union + +import numpy as np +from PIL import Image + +from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin +from ...image_utils import ImageFeatureExtractionMixin, is_torch_tensor +from ...utils import TensorType, is_torch_available, logging + + +if is_torch_available(): + import torch + from torch import nn + +logger = logging.get_logger(__name__) + + +ImageInput = Union[Image.Image, np.ndarray, "torch.Tensor", List[Image.Image], List[np.ndarray], List["torch.Tensor"]] + + +# Copied from transformers.models.detr.feature_extraction_detr.center_to_corners_format +def center_to_corners_format(x): + """ + Converts a PyTorch tensor of bounding boxes of center format (center_x, center_y, width, height) to corners format + (x_0, y_0, x_1, y_1). + """ + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +# Copied from transformers.models.detr.feature_extraction_detr.corners_to_center_format +def corners_to_center_format(x): + """ + Converts a NumPy array of bounding boxes of shape (number of bounding boxes, 4) of corners format (x_0, y_0, x_1, + y_1) to center format (center_x, center_y, width, height). + """ + x_transposed = x.T + x0, y0, x1, y1 = x_transposed[0], x_transposed[1], x_transposed[2], x_transposed[3] + b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] + return np.stack(b, axis=-1) + + +# Copied from transformers.models.detr.feature_extraction_detr.masks_to_boxes +def masks_to_boxes(masks): + """ + Compute the bounding boxes around the provided panoptic segmentation masks. + + The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. + + Returns a [N, 4] tensor, with the boxes in corner (xyxy) format. + """ + if masks.size == 0: + return np.zeros((0, 4)) + + h, w = masks.shape[-2:] + + y = np.arange(0, h, dtype=np.float32) + x = np.arange(0, w, dtype=np.float32) + # see https://github.com/pytorch/pytorch/issues/50276 + y, x = np.meshgrid(y, x, indexing="ij") + + x_mask = masks * np.expand_dims(x, axis=0) + x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) + x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) + x_min = x.filled(fill_value=1e8) + x_min = x_min.reshape(x_min.shape[0], -1).min(-1) + + y_mask = masks * np.expand_dims(y, axis=0) + y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) + y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) + y_min = y.filled(fill_value=1e8) + y_min = y_min.reshape(y_min.shape[0], -1).min(-1) + + return np.stack([x_min, y_min, x_max, y_max], 1) + + +# Copied from transformers.models.detr.feature_extraction_detr.rgb_to_id +def rgb_to_id(color): + if isinstance(color, np.ndarray) and len(color.shape) == 3: + if color.dtype == np.uint8: + color = color.astype(np.int32) + return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] + return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) + + +# Copied from transformers.models.detr.feature_extraction_detr.id_to_rgb +def id_to_rgb(id_map): + if isinstance(id_map, np.ndarray): + id_map_copy = id_map.copy() + rgb_shape = tuple(list(id_map.shape) + [3]) + rgb_map = np.zeros(rgb_shape, dtype=np.uint8) + for i in range(3): + rgb_map[..., i] = id_map_copy % 256 + id_map_copy //= 256 + return rgb_map + color = [] + for _ in range(3): + color.append(id_map % 256) + id_map //= 256 + return color + + +class DeformableDetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): + r""" + Constructs a Deformable DETR feature extractor. Differs only in the postprocessing of object detection compared to + DETR. + + This feature extractor inherits from [`FeatureExtractionMixin`] which contains most of the main methods. Users + should refer to this superclass for more information regarding those methods. + + Args: + format (`str`, *optional*, defaults to `"coco_detection"`): + Data format of the annotations. One of "coco_detection" or "coco_panoptic". + do_resize (`bool`, *optional*, defaults to `True`): + Whether to resize the input to a certain `size`. + size (`int`, *optional*, defaults to 800): + Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a + sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of + the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * + height / width, size)`. + max_size (`int`, *optional*, defaults to 1333): + The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is + set to `True`. + do_normalize (`bool`, *optional*, defaults to `True`): + Whether or not to normalize the input with mean and standard deviation. + image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): + The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. + image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): + The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the + ImageNet std. + """ + + model_input_names = ["pixel_values", "pixel_mask"] + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__init__ + def __init__( + self, + format="coco_detection", + do_resize=True, + size=800, + max_size=1333, + do_normalize=True, + image_mean=None, + image_std=None, + **kwargs + ): + super().__init__(**kwargs) + self.format = self._is_valid_format(format) + self.do_resize = do_resize + self.size = size + self.max_size = max_size + self.do_normalize = do_normalize + self.image_mean = image_mean if image_mean is not None else [0.485, 0.456, 0.406] # ImageNet mean + self.image_std = image_std if image_std is not None else [0.229, 0.224, 0.225] # ImageNet std + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._is_valid_format + def _is_valid_format(self, format): + if format not in ["coco_detection", "coco_panoptic"]: + raise ValueError(f"Format {format} not supported") + return format + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare + def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): + if self.format == "coco_detection": + image, target = self.prepare_coco_detection(image, target, return_segmentation_masks) + return image, target + elif self.format == "coco_panoptic": + image, target = self.prepare_coco_panoptic(image, target, masks_path) + return image, target + else: + raise ValueError(f"Format {self.format} not supported") + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.convert_coco_poly_to_mask + def convert_coco_poly_to_mask(self, segmentations, height, width): + + try: + from pycocotools import mask as coco_mask + except ImportError: + raise ImportError("Pycocotools is not installed in your environment.") + + masks = [] + for polygons in segmentations: + rles = coco_mask.frPyObjects(polygons, height, width) + mask = coco_mask.decode(rles) + if len(mask.shape) < 3: + mask = mask[..., None] + mask = np.asarray(mask, dtype=np.uint8) + mask = np.any(mask, axis=2) + masks.append(mask) + if masks: + masks = np.stack(masks, axis=0) + else: + masks = np.zeros((0, height, width), dtype=np.uint8) + + return masks + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_detection + def prepare_coco_detection(self, image, target, return_segmentation_masks=False): + """ + Convert the target in COCO format into the format expected by DETR. + """ + w, h = image.size + + image_id = target["image_id"] + image_id = np.asarray([image_id], dtype=np.int64) + + # get all COCO annotations for the given image + anno = target["annotations"] + + anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] + + boxes = [obj["bbox"] for obj in anno] + # guard against no boxes via resizing + boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) + boxes[:, 2:] += boxes[:, :2] + boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) + boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) + + classes = [obj["category_id"] for obj in anno] + classes = np.asarray(classes, dtype=np.int64) + + if return_segmentation_masks: + segmentations = [obj["segmentation"] for obj in anno] + masks = self.convert_coco_poly_to_mask(segmentations, h, w) + + keypoints = None + if anno and "keypoints" in anno[0]: + keypoints = [obj["keypoints"] for obj in anno] + keypoints = np.asarray(keypoints, dtype=np.float32) + num_keypoints = keypoints.shape[0] + if num_keypoints: + keypoints = keypoints.reshape((-1, 3)) + + keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) + boxes = boxes[keep] + classes = classes[keep] + if return_segmentation_masks: + masks = masks[keep] + if keypoints is not None: + keypoints = keypoints[keep] + + target = {} + target["boxes"] = boxes + target["class_labels"] = classes + if return_segmentation_masks: + target["masks"] = masks + target["image_id"] = image_id + if keypoints is not None: + target["keypoints"] = keypoints + + # for conversion to coco api + area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) + iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) + target["area"] = area[keep] + target["iscrowd"] = iscrowd[keep] + + target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) + target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) + + return image, target + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic + def prepare_coco_panoptic(self, image, target, masks_path, return_masks=True): + w, h = image.size + ann_info = target.copy() + ann_path = pathlib.Path(masks_path) / ann_info["file_name"] + + if "segments_info" in ann_info: + masks = np.asarray(Image.open(ann_path), dtype=np.uint32) + masks = rgb_to_id(masks) + + ids = np.array([ann["id"] for ann in ann_info["segments_info"]]) + masks = masks == ids[:, None, None] + masks = np.asarray(masks, dtype=np.uint8) + + labels = np.asarray([ann["category_id"] for ann in ann_info["segments_info"]], dtype=np.int64) + + target = {} + target["image_id"] = np.asarray( + [ann_info["image_id"] if "image_id" in ann_info else ann_info["id"]], dtype=np.int64 + ) + if return_masks: + target["masks"] = masks + target["class_labels"] = labels + + target["boxes"] = masks_to_boxes(masks) + + target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) + target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) + if "segments_info" in ann_info: + target["iscrowd"] = np.asarray([ann["iscrowd"] for ann in ann_info["segments_info"]], dtype=np.int64) + target["area"] = np.asarray([ann["area"] for ann in ann_info["segments_info"]], dtype=np.float32) + + return image, target + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._resize + def _resize(self, image, size, target=None, max_size=None): + """ + Resize the image to the given size. Size can be min_size (scalar) or (w, h) tuple. If size is an int, smaller + edge of the image will be matched to this number. + + If given, also resize the target accordingly. + """ + if not isinstance(image, Image.Image): + image = self.to_pil_image(image) + + def get_size_with_aspect_ratio(image_size, size, max_size=None): + w, h = image_size + if max_size is not None: + min_original_size = float(min((w, h))) + max_original_size = float(max((w, h))) + if max_original_size / min_original_size * size > max_size: + size = int(round(max_size * min_original_size / max_original_size)) + + if (w <= h and w == size) or (h <= w and h == size): + return (h, w) + + if w < h: + ow = size + oh = int(size * h / w) + else: + oh = size + ow = int(size * w / h) + + return (oh, ow) + + def get_size(image_size, size, max_size=None): + if isinstance(size, (list, tuple)): + return size + else: + # size returned must be (w, h) since we use PIL to resize images + # so we revert the tuple + return get_size_with_aspect_ratio(image_size, size, max_size)[::-1] + + size = get_size(image.size, size, max_size) + rescaled_image = self.resize(image, size=size) + + if target is None: + return rescaled_image, None + + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) + ratio_width, ratio_height = ratios + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) + target["boxes"] = scaled_boxes + + if "area" in target: + area = target["area"] + scaled_area = area * (ratio_width * ratio_height) + target["area"] = scaled_area + + w, h = size + target["size"] = np.asarray([h, w], dtype=np.int64) + + if "masks" in target: + # use PyTorch as current workaround + # TODO replace by self.resize + masks = torch.from_numpy(target["masks"][:, None]).float() + interpolated_masks = nn.functional.interpolate(masks, size=(h, w), mode="nearest")[:, 0] > 0.5 + target["masks"] = interpolated_masks.numpy() + + return rescaled_image, target + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._normalize + def _normalize(self, image, mean, std, target=None): + """ + Normalize the image with a certain mean and std. + + If given, also normalize the target bounding boxes based on the size of the image. + """ + + image = self.normalize(image, mean=mean, std=std) + if target is None: + return image, None + + target = target.copy() + h, w = image.shape[-2:] + + if "boxes" in target: + boxes = target["boxes"] + boxes = corners_to_center_format(boxes) + boxes = boxes / np.asarray([w, h, w, h], dtype=np.float32) + target["boxes"] = boxes + + return image, target + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.__call__ + def __call__( + self, + images: ImageInput, + annotations: Union[List[Dict], List[List[Dict]]] = None, + return_segmentation_masks: Optional[bool] = False, + masks_path: Optional[pathlib.Path] = None, + pad_and_return_pixel_mask: Optional[bool] = True, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs, + ) -> BatchFeature: + """ + Main method to prepare for the model one or several image(s) and optional annotations. Images are by default + padded up to the largest image in a batch, and a pixel mask is created that indicates which pixels are + real/which are padding. + + + + NumPy arrays and PyTorch tensors are converted to PIL images when resizing, so the most efficient is to pass + PIL images. + + + + Args: + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a + number of channels, H and W are image height and width. + + annotations (`Dict`, `List[Dict]`, *optional*): + The corresponding annotations in COCO format. + + In case [`DetrFeatureExtractor`] was initialized with `format = "coco_detection"`, the annotations for + each image should have the following format: {'image_id': int, 'annotations': [annotation]}, with the + annotations being a list of COCO object annotations. + + In case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`, the annotations for + each image should have the following format: {'image_id': int, 'file_name': str, 'segments_info': + [segment_info]} with segments_info being a list of COCO panoptic annotations. + + return_segmentation_masks (`Dict`, `List[Dict]`, *optional*, defaults to `False`): + Whether to also include instance segmentation masks as part of the labels in case `format = + "coco_detection"`. + + masks_path (`pathlib.Path`, *optional*): + Path to the directory containing the PNG files that store the class-agnostic image segmentations. Only + relevant in case [`DetrFeatureExtractor`] was initialized with `format = "coco_panoptic"`. + + pad_and_return_pixel_mask (`bool`, *optional*, defaults to `True`): + Whether or not to pad images up to the largest image in a batch and create a pixel mask. + + If left to the default, will return a pixel mask that is: + + - 1 for pixels that are real (i.e. **not masked**), + - 0 for pixels that are padding (i.e. **masked**). + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` + objects. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **pixel_values** -- Pixel values to be fed to a model. + - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if + *"pixel_mask"* is in `self.model_input_names`). + - **labels** -- Optional labels to be fed to a model (when `annotations` are provided) + """ + # Input type checking for clearer error + + valid_images = False + valid_annotations = False + valid_masks_path = False + + # Check that images has a valid type + if isinstance(images, (Image.Image, np.ndarray)) or is_torch_tensor(images): + valid_images = True + elif isinstance(images, (list, tuple)): + if len(images) == 0 or isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0]): + valid_images = True + + if not valid_images: + raise ValueError( + "Images must of type `PIL.Image.Image`, `np.ndarray` or `torch.Tensor` (single example), " + "`List[PIL.Image.Image]`, `List[np.ndarray]` or `List[torch.Tensor]` (batch of examples)." + ) + + is_batched = bool( + isinstance(images, (list, tuple)) + and (isinstance(images[0], (Image.Image, np.ndarray)) or is_torch_tensor(images[0])) + ) + + # Check that annotations has a valid type + if annotations is not None: + if not is_batched: + if self.format == "coco_detection": + if isinstance(annotations, dict) and "image_id" in annotations and "annotations" in annotations: + if isinstance(annotations["annotations"], (list, tuple)): + # an image can have no annotations + if len(annotations["annotations"]) == 0 or isinstance(annotations["annotations"][0], dict): + valid_annotations = True + elif self.format == "coco_panoptic": + if isinstance(annotations, dict) and "image_id" in annotations and "segments_info" in annotations: + if isinstance(annotations["segments_info"], (list, tuple)): + # an image can have no segments (?) + if len(annotations["segments_info"]) == 0 or isinstance( + annotations["segments_info"][0], dict + ): + valid_annotations = True + else: + if isinstance(annotations, (list, tuple)): + if len(images) != len(annotations): + raise ValueError("There must be as many annotations as there are images") + if isinstance(annotations[0], Dict): + if self.format == "coco_detection": + if isinstance(annotations[0]["annotations"], (list, tuple)): + valid_annotations = True + elif self.format == "coco_panoptic": + if isinstance(annotations[0]["segments_info"], (list, tuple)): + valid_annotations = True + + if not valid_annotations: + raise ValueError( + """ + Annotations must of type `Dict` (single image) or `List[Dict]` (batch of images). In case of object + detection, each dictionary should contain the keys 'image_id' and 'annotations', with the latter + being a list of annotations in COCO format. In case of panoptic segmentation, each dictionary + should contain the keys 'file_name', 'image_id' and 'segments_info', with the latter being a list + of annotations in COCO format. + """ + ) + + # Check that masks_path has a valid type + if masks_path is not None: + if self.format == "coco_panoptic": + if isinstance(masks_path, pathlib.Path): + valid_masks_path = True + if not valid_masks_path: + raise ValueError( + "The path to the directory containing the mask PNG files should be provided as a" + " `pathlib.Path` object." + ) + + if not is_batched: + images = [images] + if annotations is not None: + annotations = [annotations] + + # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) + if annotations is not None: + for idx, (image, target) in enumerate(zip(images, annotations)): + if not isinstance(image, Image.Image): + image = self.to_pil_image(image) + image, target = self.prepare(image, target, return_segmentation_masks, masks_path) + images[idx] = image + annotations[idx] = target + + # transformations (resizing + normalization) + if self.do_resize and self.size is not None: + if annotations is not None: + for idx, (image, target) in enumerate(zip(images, annotations)): + image, target = self._resize(image=image, target=target, size=self.size, max_size=self.max_size) + images[idx] = image + annotations[idx] = target + else: + for idx, image in enumerate(images): + images[idx] = self._resize(image=image, target=None, size=self.size, max_size=self.max_size)[0] + + if self.do_normalize: + if annotations is not None: + for idx, (image, target) in enumerate(zip(images, annotations)): + image, target = self._normalize( + image=image, mean=self.image_mean, std=self.image_std, target=target + ) + images[idx] = image + annotations[idx] = target + else: + images = [ + self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images + ] + + if pad_and_return_pixel_mask: + # pad images up to largest image in batch and create pixel_mask + max_size = self._max_by_axis([list(image.shape) for image in images]) + c, h, w = max_size + padded_images = [] + pixel_mask = [] + for image in images: + # create padded image + padded_image = np.zeros((c, h, w), dtype=np.float32) + padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) + padded_images.append(padded_image) + # create pixel mask + mask = np.zeros((h, w), dtype=np.int64) + mask[: image.shape[1], : image.shape[2]] = True + pixel_mask.append(mask) + images = padded_images + + # return as BatchFeature + data = {} + data["pixel_values"] = images + if pad_and_return_pixel_mask: + data["pixel_mask"] = pixel_mask + encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) + + if annotations is not None: + # Convert to TensorType + tensor_type = return_tensors + if not isinstance(tensor_type, TensorType): + tensor_type = TensorType(tensor_type) + + if not tensor_type == TensorType.PYTORCH: + raise ValueError("Only PyTorch is supported for the moment.") + else: + if not is_torch_available(): + raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed.") + + encoded_inputs["labels"] = [ + {k: torch.from_numpy(v) for k, v in target.items()} for target in annotations + ] + + return encoded_inputs + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor._max_by_axis + def _max_by_axis(self, the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.pad_and_create_pixel_mask + def pad_and_create_pixel_mask( + self, pixel_values_list: List["torch.Tensor"], return_tensors: Optional[Union[str, TensorType]] = None + ): + """ + Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. + + Args: + pixel_values_list (`List[torch.Tensor]`): + List of images (pixel values) to be padded. Each image should be a tensor of shape (C, H, W). + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` + objects. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **pixel_values** -- Pixel values to be fed to a model. + - **pixel_mask** -- Pixel mask to be fed to a model (when `pad_and_return_pixel_mask=True` or if + *"pixel_mask"* is in `self.model_input_names`). + + """ + + max_size = self._max_by_axis([list(image.shape) for image in pixel_values_list]) + c, h, w = max_size + padded_images = [] + pixel_mask = [] + for image in pixel_values_list: + # create padded image + padded_image = np.zeros((c, h, w), dtype=np.float32) + padded_image[: image.shape[0], : image.shape[1], : image.shape[2]] = np.copy(image) + padded_images.append(padded_image) + # create pixel mask + mask = np.zeros((h, w), dtype=np.int64) + mask[: image.shape[1], : image.shape[2]] = True + pixel_mask.append(mask) + + # return as BatchFeature + data = {"pixel_values": padded_images, "pixel_mask": pixel_mask} + encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) + + return encoded_inputs + + def post_process(self, outputs, target_sizes): + """ + Converts the output of [`DeformableDetrForObjectDetection`] into the format expected by the COCO api. Only + supports PyTorch. + + Args: + outputs ([`DeformableDetrObjectDetectionOutput`]): + Raw outputs of the model. + target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): + Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the + original image size (before any data augmentation). For visualization, this should be the image size + after data augment, but before padding. + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image + in the batch as predicted by the model. + """ + out_logits, out_bbox = outputs.logits, outputs.pred_boxes + + if len(out_logits) != len(target_sizes): + raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") + if target_sizes.shape[1] != 2: + raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") + + prob = out_logits.sigmoid() + topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) + scores = topk_values + topk_boxes = topk_indexes // out_logits.shape[2] + labels = topk_indexes % out_logits.shape[2] + boxes = center_to_corners_format(out_bbox) + boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) + + # and from relative [0, 1] to absolute [0, height] coordinates + img_h, img_w = target_sizes.unbind(1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + boxes = boxes * scale_fct[:, None, :] + + results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] + + return results + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_segmentation with Detr->DeformableDetr + def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): + """ + Converts the output of [`DeformableDetrForSegmentation`] into image segmentation predictions. Only supports + PyTorch. + + Parameters: + outputs ([`DeformableDetrSegmentationOutput`]): + Raw outputs of the model. + target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): + Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. + threshold (`float`, *optional*, defaults to 0.9): + Threshold to use to filter out queries. + mask_threshold (`float`, *optional*, defaults to 0.5): + Threshold to use when turning the predicted masks into binary values. + + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image + in the batch as predicted by the model. + """ + out_logits, raw_masks = outputs.logits, outputs.pred_masks + preds = [] + + def to_tuple(tup): + if isinstance(tup, tuple): + return tup + return tuple(tup.cpu().tolist()) + + for cur_logits, cur_masks, size in zip(out_logits, raw_masks, target_sizes): + # we filter empty queries and detection below threshold + scores, labels = cur_logits.softmax(-1).max(-1) + keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold) + cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) + cur_scores = cur_scores[keep] + cur_classes = cur_classes[keep] + cur_masks = cur_masks[keep] + cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) + cur_masks = (cur_masks.sigmoid() > mask_threshold) * 1 + + predictions = {"scores": cur_scores, "labels": cur_classes, "masks": cur_masks} + preds.append(predictions) + return preds + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_instance with Detr->DeformableDetr + def post_process_instance(self, results, outputs, orig_target_sizes, max_target_sizes, threshold=0.5): + """ + Converts the output of [`DeformableDetrForSegmentation`] into actual instance segmentation predictions. Only + supports PyTorch. + + Args: + results (`List[Dict]`): + Results list obtained by [`~DeformableDetrFeatureExtractor.post_process`], to which "masks" results + will be added. + outputs ([`DeformableDetrSegmentationOutput`]): + Raw outputs of the model. + orig_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): + Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original + image size (before any data augmentation). + max_target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): + Tensor containing the maximum size (h, w) of each image of the batch. For evaluation, this must be the + original image size (before any data augmentation). + threshold (`float`, *optional*, defaults to 0.5): + Threshold to use when turning the predicted masks into binary values. + + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an + image in the batch as predicted by the model. + """ + + if len(orig_target_sizes) != len(max_target_sizes): + raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes") + max_h, max_w = max_target_sizes.max(0)[0].tolist() + outputs_masks = outputs.pred_masks.squeeze(2) + outputs_masks = nn.functional.interpolate( + outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False + ) + outputs_masks = (outputs_masks.sigmoid() > threshold).cpu() + + for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): + img_h, img_w = t[0], t[1] + results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) + results[i]["masks"] = nn.functional.interpolate( + results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" + ).byte() + + return results + + # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_panoptic with Detr->DeformableDetr + def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_thing_map=None, threshold=0.85): + """ + Converts the output of [`DeformableDetrForSegmentation`] into actual panoptic predictions. Only supports + PyTorch. + + Parameters: + outputs ([`DeformableDetrSegmentationOutput`]): + Raw outputs of the model. + processed_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`): + Torch Tensor (or list) containing the size (h, w) of each image of the batch, i.e. the size after data + augmentation but before batching. + target_sizes (`torch.Tensor` of shape `(batch_size, 2)` or `List[Tuple]` of length `batch_size`, *optional*): + Torch Tensor (or list) corresponding to the requested final size (h, w) of each prediction. If left to + None, it will default to the `processed_sizes`. + is_thing_map (`torch.Tensor` of shape `(batch_size, 2)`, *optional*): + Dictionary mapping class indices to either True or False, depending on whether or not they are a thing. + If not set, defaults to the `is_thing_map` of COCO panoptic. + threshold (`float`, *optional*, defaults to 0.85): + Threshold to use to filter out queries. + + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for + an image in the batch as predicted by the model. + """ + if target_sizes is None: + target_sizes = processed_sizes + if len(processed_sizes) != len(target_sizes): + raise ValueError("Make sure to pass in as many processed_sizes as target_sizes") + + if is_thing_map is None: + # default to is_thing_map of COCO panoptic + is_thing_map = {i: i <= 90 for i in range(201)} + + out_logits, raw_masks, raw_boxes = outputs.logits, outputs.pred_masks, outputs.pred_boxes + if not len(out_logits) == len(raw_masks) == len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits and masks" + ) + preds = [] + + def to_tuple(tup): + if isinstance(tup, tuple): + return tup + return tuple(tup.cpu().tolist()) + + for cur_logits, cur_masks, cur_boxes, size, target_size in zip( + out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes + ): + # we filter empty queries and detection below threshold + scores, labels = cur_logits.softmax(-1).max(-1) + keep = labels.ne(outputs.logits.shape[-1] - 1) & (scores > threshold) + cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) + cur_scores = cur_scores[keep] + cur_classes = cur_classes[keep] + cur_masks = cur_masks[keep] + cur_masks = nn.functional.interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) + cur_boxes = center_to_corners_format(cur_boxes[keep]) + + h, w = cur_masks.shape[-2:] + if len(cur_boxes) != len(cur_classes): + raise ValueError("Not as many boxes as there are classes") + + # It may be that we have several predicted masks for the same stuff class. + # In the following, we track the list of masks ids for each stuff class (they are merged later on) + cur_masks = cur_masks.flatten(1) + stuff_equiv_classes = defaultdict(lambda: []) + for k, label in enumerate(cur_classes): + if not is_thing_map[label.item()]: + stuff_equiv_classes[label.item()].append(k) + + def get_ids_area(masks, scores, dedup=False): + # This helper function creates the final panoptic segmentation image + # It also returns the area of the masks that appears on the image + + m_id = masks.transpose(0, 1).softmax(-1) + + if m_id.shape[-1] == 0: + # We didn't detect any mask :( + m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) + else: + m_id = m_id.argmax(-1).view(h, w) + + if dedup: + # Merge the masks corresponding to the same stuff class + for equiv in stuff_equiv_classes.values(): + if len(equiv) > 1: + for eq_id in equiv: + m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) + + final_h, final_w = to_tuple(target_size) + + seg_img = Image.fromarray(id_to_rgb(m_id.view(h, w).cpu().numpy())) + seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) + + np_seg_img = torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())) + np_seg_img = np_seg_img.view(final_h, final_w, 3) + np_seg_img = np_seg_img.numpy() + + m_id = torch.from_numpy(rgb_to_id(np_seg_img)) + + area = [] + for i in range(len(scores)): + area.append(m_id.eq(i).sum().item()) + return area, seg_img + + area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) + if cur_classes.numel() > 0: + # We know filter empty masks as long as we find some + while True: + filtered_small = torch.as_tensor( + [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device + ) + if filtered_small.any().item(): + cur_scores = cur_scores[~filtered_small] + cur_classes = cur_classes[~filtered_small] + cur_masks = cur_masks[~filtered_small] + area, seg_img = get_ids_area(cur_masks, cur_scores) + else: + break + + else: + cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) + + segments_info = [] + for i, a in enumerate(area): + cat = cur_classes[i].item() + segments_info.append({"id": i, "isthing": is_thing_map[cat], "category_id": cat, "area": a}) + del cur_classes + + with io.BytesIO() as out: + seg_img.save(out, format="PNG") + predictions = {"png_string": out.getvalue(), "segments_info": segments_info} + preds.append(predictions) + return preds diff --git a/src/transformers/models/deformable_detr/modeling_deformable_detr.py b/src/transformers/models/deformable_detr/modeling_deformable_detr.py index 5038cd391c2b3c..0b176a318c4c03 100755 --- a/src/transformers/models/deformable_detr/modeling_deformable_detr.py +++ b/src/transformers/models/deformable_detr/modeling_deformable_detr.py @@ -1884,15 +1884,15 @@ def forward( >>> results = feature_extractor.post_process(outputs, target_sizes=target_sizes)[0] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] - ... # let's only keep detections with score > 0.7 - ... if score > 0.7: + ... # let's only keep detections with score > 0.5 + ... if score > 0.5: ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) - Detected cat with confidence 0.856 at location [342.19, 24.3, 640.02, 372.25] - Detected remote with confidence 0.739 at location [40.79, 72.78, 176.76, 117.25] - Detected cat with confidence 0.859 at location [16.5, 52.84, 318.25, 470.78] + Detected cat with confidence 0.8 at location [16.5, 52.84, 318.25, 470.78] + Detected cat with confidence 0.789 at location [342.19, 24.3, 640.02, 372.25] + Detected remote with confidence 0.633 at location [40.79, 72.78, 176.76, 117.25] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict diff --git a/src/transformers/models/detr/feature_extraction_detr.py b/src/transformers/models/detr/feature_extraction_detr.py index 91e406c71fc944..4377bd6f8d60e9 100644 --- a/src/transformers/models/detr/feature_extraction_detr.py +++ b/src/transformers/models/detr/feature_extraction_detr.py @@ -137,7 +137,7 @@ class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. - max_size (`int`, *optional*, defaults to `1333`): + max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): @@ -683,9 +683,9 @@ def post_process(self, outputs, target_sizes): outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): - Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original - image size (before any data augmentation). For visualization, this should be the image size after data - augment, but before padding. + Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the + original image size (before any data augmentation). For visualization, this should be the image size + after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image diff --git a/src/transformers/models/yolos/feature_extraction_yolos.py b/src/transformers/models/yolos/feature_extraction_yolos.py index e199d1ae7bf463..616d8e4849dc4d 100644 --- a/src/transformers/models/yolos/feature_extraction_yolos.py +++ b/src/transformers/models/yolos/feature_extraction_yolos.py @@ -666,9 +666,9 @@ def post_process(self, outputs, target_sizes): outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): - Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original - image size (before any data augmentation). For visualization, this should be the image size after data - augment, but before padding. + Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the + original image size (before any data augmentation). For visualization, this should be the image size + after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image diff --git a/src/transformers/utils/dummy_vision_objects.py b/src/transformers/utils/dummy_vision_objects.py index d2ec5be33ceb8c..2d1f0a88cd0ebf 100644 --- a/src/transformers/utils/dummy_vision_objects.py +++ b/src/transformers/utils/dummy_vision_objects.py @@ -38,6 +38,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) +class DeformableDetrFeatureExtractor(metaclass=DummyObject): + _backends = ["vision"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["vision"]) + + class DeiTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] diff --git a/tests/models/deformable_detr/test_feature_extraction_deformable_detr.py b/tests/models/deformable_detr/test_feature_extraction_deformable_detr.py new file mode 100644 index 00000000000000..6a7cfefee52c0d --- /dev/null +++ b/tests/models/deformable_detr/test_feature_extraction_deformable_detr.py @@ -0,0 +1,341 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import json +import pathlib +import unittest + +import numpy as np + +from transformers.testing_utils import require_torch, require_vision, slow +from transformers.utils import is_torch_available, is_vision_available + +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin, prepare_image_inputs + + +if is_torch_available(): + import torch + +if is_vision_available(): + from PIL import Image + + from transformers import DeformableDetrFeatureExtractor + + +class DeformableDetrFeatureExtractionTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + num_channels=3, + min_resolution=30, + max_resolution=400, + do_resize=True, + size=18, + max_size=1333, # by setting max_size > max_resolution we're effectively not testing this :p + do_normalize=True, + image_mean=[0.5, 0.5, 0.5], + image_std=[0.5, 0.5, 0.5], + ): + self.parent = parent + self.batch_size = batch_size + self.num_channels = num_channels + self.min_resolution = min_resolution + self.max_resolution = max_resolution + self.do_resize = do_resize + self.size = size + self.max_size = max_size + self.do_normalize = do_normalize + self.image_mean = image_mean + self.image_std = image_std + + def prepare_feat_extract_dict(self): + return { + "do_resize": self.do_resize, + "size": self.size, + "max_size": self.max_size, + "do_normalize": self.do_normalize, + "image_mean": self.image_mean, + "image_std": self.image_std, + } + + def get_expected_values(self, image_inputs, batched=False): + """ + This function computes the expected height and width when providing images to DeformableDetrFeatureExtractor, + assuming do_resize is set to True with a scalar size. + """ + if not batched: + image = image_inputs[0] + if isinstance(image, Image.Image): + w, h = image.size + else: + h, w = image.shape[1], image.shape[2] + if w < h: + expected_height = int(self.size * h / w) + expected_width = self.size + elif w > h: + expected_height = self.size + expected_width = int(self.size * w / h) + else: + expected_height = self.size + expected_width = self.size + + else: + expected_values = [] + for image in image_inputs: + expected_height, expected_width = self.get_expected_values([image]) + expected_values.append((expected_height, expected_width)) + expected_height = max(expected_values, key=lambda item: item[0])[0] + expected_width = max(expected_values, key=lambda item: item[1])[1] + + return expected_height, expected_width + + +@require_torch +@require_vision +class DeformableDetrFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): + + feature_extraction_class = DeformableDetrFeatureExtractor if is_vision_available() else None + + def setUp(self): + self.feature_extract_tester = DeformableDetrFeatureExtractionTester(self) + + @property + def feat_extract_dict(self): + return self.feature_extract_tester.prepare_feat_extract_dict() + + def test_feat_extract_properties(self): + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + self.assertTrue(hasattr(feature_extractor, "image_mean")) + self.assertTrue(hasattr(feature_extractor, "image_std")) + self.assertTrue(hasattr(feature_extractor, "do_normalize")) + self.assertTrue(hasattr(feature_extractor, "do_resize")) + self.assertTrue(hasattr(feature_extractor, "size")) + self.assertTrue(hasattr(feature_extractor, "max_size")) + + def test_batch_feature(self): + pass + + def test_call_pil(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PIL images + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False) + for image in image_inputs: + self.assertIsInstance(image, Image.Image) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + + self.assertEqual( + encoded_images.shape, + (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + ) + + # Test batched + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + expected_height, + expected_width, + ), + ) + + def test_call_numpy(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random numpy tensors + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, numpify=True) + for image in image_inputs: + self.assertIsInstance(image, np.ndarray) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + + self.assertEqual( + encoded_images.shape, + (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + expected_height, + expected_width, + ), + ) + + def test_call_pytorch(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class(**self.feat_extract_dict) + # create random PyTorch tensors + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + for image in image_inputs: + self.assertIsInstance(image, torch.Tensor) + + # Test not batched input + encoded_images = feature_extractor(image_inputs[0], return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs) + + self.assertEqual( + encoded_images.shape, + (1, self.feature_extract_tester.num_channels, expected_height, expected_width), + ) + + # Test batched + encoded_images = feature_extractor(image_inputs, return_tensors="pt").pixel_values + + expected_height, expected_width = self.feature_extract_tester.get_expected_values(image_inputs, batched=True) + + self.assertEqual( + encoded_images.shape, + ( + self.feature_extract_tester.batch_size, + self.feature_extract_tester.num_channels, + expected_height, + expected_width, + ), + ) + + def test_equivalence_pad_and_create_pixel_mask(self): + # Initialize feature_extractors + feature_extractor_1 = self.feature_extraction_class(**self.feat_extract_dict) + feature_extractor_2 = self.feature_extraction_class(do_resize=False, do_normalize=False) + # create random PyTorch tensors + image_inputs = prepare_image_inputs(self.feature_extract_tester, equal_resolution=False, torchify=True) + for image in image_inputs: + self.assertIsInstance(image, torch.Tensor) + + # Test whether the method "pad_and_return_pixel_mask" and calling the feature extractor return the same tensors + encoded_images_with_method = feature_extractor_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") + encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") + + self.assertTrue( + torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) + ) + self.assertTrue( + torch.allclose(encoded_images_with_method["pixel_mask"], encoded_images["pixel_mask"], atol=1e-4) + ) + + @slow + def test_call_pytorch_with_coco_detection_annotations(self): + # prepare image and target + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt", "r") as f: + target = json.loads(f.read()) + + target = {"image_id": 39769, "annotations": target} + + # encode them + feature_extractor = DeformableDetrFeatureExtractor() + encoding = feature_extractor(images=image, annotations=target, return_tensors="pt") + + # verify pixel values + expected_shape = torch.Size([1, 3, 800, 1066]) + self.assertEqual(encoding["pixel_values"].shape, expected_shape) + + expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) + self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) + + # verify area + expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) + self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) + # verify boxes + expected_boxes_shape = torch.Size([6, 4]) + self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) + expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) + self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) + # verify image_id + expected_image_id = torch.tensor([39769]) + self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) + # verify is_crowd + expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) + self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) + # verify class_labels + expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) + self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) + # verify orig_size + expected_orig_size = torch.tensor([480, 640]) + self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) + # verify size + expected_size = torch.tensor([800, 1066]) + self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) + + @slow + def test_call_pytorch_with_coco_panoptic_annotations(self): + # prepare image, target and masks_path + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt", "r") as f: + target = json.loads(f.read()) + + target = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target} + + masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") + + # encode them + feature_extractor = DeformableDetrFeatureExtractor(format="coco_panoptic") + encoding = feature_extractor(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") + + # verify pixel values + expected_shape = torch.Size([1, 3, 800, 1066]) + self.assertEqual(encoding["pixel_values"].shape, expected_shape) + + expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) + self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) + + # verify area + expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) + self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) + # verify boxes + expected_boxes_shape = torch.Size([6, 4]) + self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) + expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) + self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) + # verify image_id + expected_image_id = torch.tensor([39769]) + self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) + # verify is_crowd + expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) + self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) + # verify class_labels + expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) + self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) + # verify masks + expected_masks_sum = 822338 + self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) + # verify orig_size + expected_orig_size = torch.tensor([480, 640]) + self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) + # verify size + expected_size = torch.tensor([800, 1066]) + self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) diff --git a/tests/models/detr/test_feature_extraction_detr.py b/tests/models/detr/test_feature_extraction_detr.py index 58bde80fbbb11e..30fae36ed83214 100644 --- a/tests/models/detr/test_feature_extraction_detr.py +++ b/tests/models/detr/test_feature_extraction_detr.py @@ -240,8 +240,12 @@ def test_equivalence_pad_and_create_pixel_mask(self): encoded_images_with_method = feature_extractor_1.pad_and_create_pixel_mask(image_inputs, return_tensors="pt") encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") - assert torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) - assert torch.allclose(encoded_images_with_method["pixel_mask"], encoded_images["pixel_mask"], atol=1e-4) + self.assertTrue( + torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) + ) + self.assertTrue( + torch.allclose(encoded_images_with_method["pixel_mask"], encoded_images["pixel_mask"], atol=1e-4) + ) @slow def test_call_pytorch_with_coco_detection_annotations(self): @@ -261,31 +265,31 @@ def test_call_pytorch_with_coco_detection_annotations(self): self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) - assert torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4) + self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) - assert torch.allclose(encoding["labels"][0]["area"], expected_area) + self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) - assert torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3) + self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) - assert torch.allclose(encoding["labels"][0]["image_id"], expected_image_id) + self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) - assert torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd) + self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) - assert torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels) + self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify orig_size expected_orig_size = torch.tensor([480, 640]) - assert torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size) + self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) - assert torch.allclose(encoding["labels"][0]["size"], expected_size) + self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): @@ -299,8 +303,7 @@ def test_call_pytorch_with_coco_panoptic_annotations(self): masks_path = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic") # encode them - # TODO replace by .from_pretrained facebook/detr-resnet-50-panoptic - feature_extractor = DetrFeatureExtractor(format="coco_panoptic") + feature_extractor = DetrFeatureExtractor.from_pretrained("facebook/detr-resnet-50-panoptic") encoding = feature_extractor(images=image, annotations=target, masks_path=masks_path, return_tensors="pt") # verify pixel values @@ -308,31 +311,31 @@ def test_call_pytorch_with_coco_panoptic_annotations(self): self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) - assert torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4) + self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) - assert torch.allclose(encoding["labels"][0]["area"], expected_area) + self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) - assert torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3) + self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) - assert torch.allclose(encoding["labels"][0]["image_id"], expected_image_id) + self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) - assert torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd) + self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) - assert torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels) + self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify masks expected_masks_sum = 822338 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) # verify orig_size expected_orig_size = torch.tensor([480, 640]) - assert torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size) + self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) - assert torch.allclose(encoding["labels"][0]["size"], expected_size) + self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) diff --git a/tests/models/yolos/test_feature_extraction_yolos.py b/tests/models/yolos/test_feature_extraction_yolos.py index 8a576a583a9af0..1c3805e8cdaccd 100644 --- a/tests/models/yolos/test_feature_extraction_yolos.py +++ b/tests/models/yolos/test_feature_extraction_yolos.py @@ -240,7 +240,9 @@ def test_equivalence_padding(self): encoded_images_with_method = feature_extractor_1.pad(image_inputs, return_tensors="pt") encoded_images = feature_extractor_2(image_inputs, return_tensors="pt") - assert torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) + self.assertTrue( + torch.allclose(encoded_images_with_method["pixel_values"], encoded_images["pixel_values"], atol=1e-4) + ) @slow def test_call_pytorch_with_coco_detection_annotations(self): @@ -260,31 +262,31 @@ def test_call_pytorch_with_coco_detection_annotations(self): self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) - assert torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4) + self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438]) - assert torch.allclose(encoding["labels"][0]["area"], expected_area) + self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215]) - assert torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3) + self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) - assert torch.allclose(encoding["labels"][0]["image_id"], expected_image_id) + self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) - assert torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd) + self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([75, 75, 63, 65, 17, 17]) - assert torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels) + self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify orig_size expected_orig_size = torch.tensor([480, 640]) - assert torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size) + self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) - assert torch.allclose(encoding["labels"][0]["size"], expected_size) + self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) @slow def test_call_pytorch_with_coco_panoptic_annotations(self): @@ -306,31 +308,31 @@ def test_call_pytorch_with_coco_panoptic_annotations(self): self.assertEqual(encoding["pixel_values"].shape, expected_shape) expected_slice = torch.tensor([0.2796, 0.3138, 0.3481]) - assert torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4) + self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3], expected_slice, atol=1e-4)) # verify area expected_area = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147]) - assert torch.allclose(encoding["labels"][0]["area"], expected_area) + self.assertTrue(torch.allclose(encoding["labels"][0]["area"], expected_area)) # verify boxes expected_boxes_shape = torch.Size([6, 4]) self.assertEqual(encoding["labels"][0]["boxes"].shape, expected_boxes_shape) expected_boxes_slice = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625]) - assert torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3) + self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0], expected_boxes_slice, atol=1e-3)) # verify image_id expected_image_id = torch.tensor([39769]) - assert torch.allclose(encoding["labels"][0]["image_id"], expected_image_id) + self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"], expected_image_id)) # verify is_crowd expected_is_crowd = torch.tensor([0, 0, 0, 0, 0, 0]) - assert torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd) + self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"], expected_is_crowd)) # verify class_labels expected_class_labels = torch.tensor([17, 17, 63, 75, 75, 93]) - assert torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels) + self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"], expected_class_labels)) # verify masks expected_masks_sum = 822338 self.assertEqual(encoding["labels"][0]["masks"].sum().item(), expected_masks_sum) # verify orig_size expected_orig_size = torch.tensor([480, 640]) - assert torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size) + self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"], expected_orig_size)) # verify size expected_size = torch.tensor([800, 1066]) - assert torch.allclose(encoding["labels"][0]["size"], expected_size) + self.assertTrue(torch.allclose(encoding["labels"][0]["size"], expected_size)) From 4d0f8c05f5acc277176985a3b93d283d3867f0fd Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Thu, 22 Sep 2022 13:14:39 +0200 Subject: [PATCH 363/539] Add `accelerate` support for ViLT (#18683) --- src/transformers/models/vilt/modeling_vilt.py | 3 ++- src/transformers/testing_utils.py | 4 ---- tests/models/vilt/test_modeling_vilt.py | 2 -- tests/test_modeling_common.py | 10 ++++++++++ 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/transformers/models/vilt/modeling_vilt.py b/src/transformers/models/vilt/modeling_vilt.py index eefa8f641ff187..020aa9a6afc647 100755 --- a/src/transformers/models/vilt/modeling_vilt.py +++ b/src/transformers/models/vilt/modeling_vilt.py @@ -491,7 +491,7 @@ def forward(self, hidden_states, attention_mask=None, head_mask=None, output_att outputs = self_attention_outputs[1:] # add self attentions if we output attention weights # first residual connection - hidden_states = attention_output + hidden_states + hidden_states = attention_output + hidden_states.to(attention_output.device) # in ViLT, layernorm is also applied after self-attention layer_output = self.layernorm_after(hidden_states) @@ -573,6 +573,7 @@ class ViltPreTrainedModel(PreTrainedModel): config_class = ViltConfig base_model_prefix = "vilt" supports_gradient_checkpointing = True + _no_split_modules = ["ViltSelfAttention"] def _init_weights(self, module): """Initialize the weights""" diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 2e99a76232c27c..b14ed5d589c593 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -772,7 +772,6 @@ class CaptureStd: ```""" def __init__(self, out=True, err=True, replay=True): - self.replay = replay if out: @@ -1122,7 +1121,6 @@ def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None): tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir """ if tmp_dir is not None: - # defining the most likely desired behavior for when a custom path is provided. # this most likely indicates the debug mode where we want an easily locatable dir that: # 1. gets cleared out before the test (if it already exists) @@ -1200,7 +1198,6 @@ def python_one_liner_max_rss(self, one_liner_str): return max_rss def tearDown(self): - # get_auto_remove_tmp_dir feature: remove registered temp dirs for path in self.teardown_tmp_dirs: shutil.rmtree(path, ignore_errors=True) @@ -1472,7 +1469,6 @@ def tee(line, sink, pipe, label=""): def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput: - loop = asyncio.get_event_loop() result = loop.run_until_complete( _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo) diff --git a/tests/models/vilt/test_modeling_vilt.py b/tests/models/vilt/test_modeling_vilt.py index 82aa0767470eba..280eff70d979af 100644 --- a/tests/models/vilt/test_modeling_vilt.py +++ b/tests/models/vilt/test_modeling_vilt.py @@ -215,7 +215,6 @@ def prepare_pixel_values(self): @require_torch class ViltModelTest(ModelTesterMixin, unittest.TestCase): - all_model_classes = ( ( ViltModel, @@ -512,7 +511,6 @@ def test_model_from_pretrained(self): @require_torch class ViltForImagesAndTextClassificationModelTest(ViltModelTest, unittest.TestCase): - all_model_classes = (ViltForImagesAndTextClassification,) if is_torch_available() else () def setUp(self): diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 082f2a8a9057f9..42ecad03c6aee9 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -2307,6 +2307,7 @@ def test_disk_offload(self): inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) + torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] @@ -2324,6 +2325,7 @@ def test_disk_offload(self): ) self.check_device_map_is_respected(new_model, new_model.hf_device_map) + torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0])) @@ -2340,6 +2342,8 @@ def test_cpu_offload(self): inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) + + torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] @@ -2355,6 +2359,8 @@ def test_cpu_offload(self): self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) + + torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0])) @@ -2371,6 +2377,8 @@ def test_model_parallelism(self): inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config).eval() model = model.to(torch_device) + + torch.manual_seed(0) base_output = model(**inputs_dict) model_size = compute_module_sizes(model)[""] @@ -2386,6 +2394,8 @@ def test_model_parallelism(self): self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1}) self.check_device_map_is_respected(new_model, new_model.hf_device_map) + + torch.manual_seed(0) new_output = new_model(**inputs_dict) self.assertTrue(torch.allclose(base_output[0], new_output[0])) From 2d9853b22622e9c5017241a14cda415d6bca13a2 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Thu, 22 Sep 2022 16:45:03 +0530 Subject: [PATCH 364/539] MSN (Masked Siamese Networks) for ViT (#18815) * feat: modeling and conversion scripts for msn. * chore: change license year. * chore: remove unneeded modules. * feat: direct loading of state_dict from remote url. * fix: import paths. * add: rest of the files. * add and fix rest of the files. Co-authored-by: Niels * chore: formatting. * code quality fix. * chore: remove pooler. * feat: add classification top. * fix: configuration object. * add: initial test cases (one failing). * fix: basemodeloutput. * add: caution on using the classification head. * add: rest of the model related files. * add: vit msn readme. * fix: copied from statement. * fix: dummy objects. * add: ViTMSNPreTrainedModel to inits. * fix: repo consistency. * minor change in the model doc. * fix: tests. * Empty-Commit * Update src/transformers/models/vit_msn/configuration_vit_msn.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * address PR comments. * Update src/transformers/models/vit_msn/modeling_vit_msn.py Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> * chore: put model in no_grad() and formatting. Co-authored-by: Niels Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/vit_msn.mdx | 64 ++ src/transformers/__init__.py | 16 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + .../models/auto/feature_extraction_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 2 + src/transformers/models/vit_msn/__init__.py | 57 ++ .../models/vit_msn/configuration_vit_msn.py | 117 +++ .../models/vit_msn/convert_msn_to_pytorch.py | 236 ++++++ .../models/vit_msn/modeling_vit_msn.py | 695 ++++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 24 + tests/models/vit_msn/__init__.py | 0 tests/models/vit_msn/test_modeling_vit_msn.py | 239 ++++++ utils/documentation_tests.txt | 1 + 20 files changed, 1464 insertions(+) create mode 100644 docs/source/en/model_doc/vit_msn.mdx create mode 100644 src/transformers/models/vit_msn/__init__.py create mode 100644 src/transformers/models/vit_msn/configuration_vit_msn.py create mode 100644 src/transformers/models/vit_msn/convert_msn_to_pytorch.py create mode 100644 src/transformers/models/vit_msn/modeling_vit_msn.py create mode 100644 tests/models/vit_msn/__init__.py create mode 100644 tests/models/vit_msn/test_modeling_vit_msn.py diff --git a/README.md b/README.md index ec8a0fd2e8b392..5e17e33b204cc1 100644 --- a/README.md +++ b/README.md @@ -385,6 +385,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. +1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. diff --git a/README_ko.md b/README_ko.md index 8a2df2fcbc8887..f53075ff5fe6f9 100644 --- a/README_ko.md +++ b/README_ko.md @@ -335,6 +335,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. +1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. diff --git a/README_zh-hans.md b/README_zh-hans.md index 88611a5f672bd0..2843a8eb29a08c 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -359,6 +359,7 @@ conda install -c huggingface transformers 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (来自 Meta AI) 伴随论文 [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) 由 Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick 发布。 +1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (来自 Meta AI) 伴随论文 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 发布. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (来自 Facebook AI) 伴随论文 [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) 由 Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli 发布。 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (来自 Facebook AI) 伴随论文 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 发布。 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index b84d3ea8ca974b..8f74b97e98549f 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -371,6 +371,7 @@ conda install -c huggingface transformers 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. +1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index a4cd1005e3e83f..223c5d2a6998fc 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -408,6 +408,8 @@ title: Vision Transformer (ViT) - local: model_doc/vit_mae title: ViTMAE + - local: model_doc/vit_msn + title: ViTMSN - local: model_doc/yolos title: YOLOS title: Vision models diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 40685a5c2fa8da..e6a3d912b27437 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -175,6 +175,7 @@ The documentation is organized into five sections: 1. **[Vision Transformer (ViT)](model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. 1. **[ViTMAE](model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. +1. **[ViTMSN](model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. 1. **[Wav2Vec2](model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. 1. **[Wav2Vec2-Conformer](model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. @@ -318,6 +319,7 @@ Flax), PyTorch, and/or TensorFlow. | VisualBERT | ❌ | ❌ | ✅ | ❌ | ❌ | | ViT | ❌ | ❌ | ✅ | ✅ | ✅ | | ViTMAE | ❌ | ❌ | ✅ | ✅ | ❌ | +| ViTMSN | ❌ | ❌ | ✅ | ❌ | ❌ | | Wav2Vec2 | ✅ | ❌ | ✅ | ✅ | ✅ | | Wav2Vec2-Conformer | ❌ | ❌ | ✅ | ❌ | ❌ | | WavLM | ❌ | ❌ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/vit_msn.mdx b/docs/source/en/model_doc/vit_msn.mdx new file mode 100644 index 00000000000000..07faed51e6cb57 --- /dev/null +++ b/docs/source/en/model_doc/vit_msn.mdx @@ -0,0 +1,64 @@ + + +# ViTMSN + +## Overview + +The ViTMSN model was proposed in [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, +Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. The paper presents a joint-embedding architecture to match the prototypes +of masked patches with that of the unmasked patches. With this setup, their method yields excellent performance in the low-shot and extreme low-shot +regimes. + +The abstract from the paper is the following: + +*We propose Masked Siamese Networks (MSN), a self-supervised learning framework for learning image representations. Our +approach matches the representation of an image view containing randomly masked patches to the representation of the original +unmasked image. This self-supervised pre-training strategy is particularly scalable when applied to Vision Transformers since only the +unmasked patches are processed by the network. As a result, MSNs improve the scalability of joint-embedding architectures, +while producing representations of a high semantic level that perform competitively on low-shot image classification. For instance, +on ImageNet-1K, with only 5,000 annotated images, our base MSN model achieves 72.4% top-1 accuracy, +and with 1% of ImageNet-1K labels, we achieve 75.7% top-1 accuracy, setting a new state-of-the-art for self-supervised learning on this benchmark.* + +Tips: + +- MSN (masked siamese networks) is a method for self-supervised pre-training of Vision Transformers (ViTs). The pre-training +objective is to match the prototypes assigned to the unmasked views of the images to that of the masked views of the same images. +- The authors have only released pre-trained weights of the backbone (ImageNet-1k pre-training). So, to use that on your own image classification dataset, +use the [`ViTMSNForImageClassification`] class which is initialized from [`ViTMSNModel`]. Follow +[this notebook](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb) for a detailed tutorial on fine-tuning. +- MSN is particularly useful in the low-shot and extreme low-shot regimes. Notably, it achieves 75.7% top-1 accuracy with only 1% of ImageNet-1K +labels when fine-tuned. + + +drawing + + MSN architecture. Taken from the original paper. + +This model was contributed by [sayakpaul](https://huggingface.co/sayakpaul). The original code can be found [here](https://github.com/facebookresearch/msn). + + +## ViTMSNConfig + +[[autodoc]] ViTMSNConfig + + +## ViTMSNModel + +[[autodoc]] ViTMSNModel + - forward + + +## ViTMSNForImageClassification + +[[autodoc]] ViTMSNForImageClassification + - forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 123c56bdd53267..50fb4d2c0b8a7e 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -359,6 +359,7 @@ "models.visual_bert": ["VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VisualBertConfig"], "models.vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig"], "models.vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"], + "models.vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"], "models.wav2vec2": [ "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config", @@ -1966,6 +1967,14 @@ "ViTMAEPreTrainedModel", ] ) + _import_structure["models.vit_msn"].extend( + [ + "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", + "ViTMSNModel", + "ViTMSNForImageClassification", + "ViTMSNPreTrainedModel", + ] + ) _import_structure["models.videomae"].extend( [ "VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3251,6 +3260,7 @@ from .models.visual_bert import VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, VisualBertConfig from .models.vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig from .models.vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig + from .models.vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig from .models.wav2vec2 import ( WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2Config, @@ -4586,6 +4596,12 @@ ViTMAEModel, ViTMAEPreTrainedModel, ) + from .models.vit_msn import ( + VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, + ViTMSNForImageClassification, + ViTMSNModel, + ViTMSNPreTrainedModel, + ) from .models.wav2vec2 import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2ForAudioFrameClassification, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 8a6622a9f35b6f..18c21cdf1865b3 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -150,6 +150,7 @@ visual_bert, vit, vit_mae, + vit_msn, wav2vec2, wav2vec2_conformer, wav2vec2_phoneme, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index dce73cb3903656..39c48b217ff53f 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -145,6 +145,7 @@ ("visual_bert", "VisualBertConfig"), ("vit", "ViTConfig"), ("vit_mae", "ViTMAEConfig"), + ("vit_msn", "ViTMSNConfig"), ("wav2vec2", "Wav2Vec2Config"), ("wav2vec2-conformer", "Wav2Vec2ConformerConfig"), ("wavlm", "WavLMConfig"), @@ -266,6 +267,7 @@ ("visual_bert", "VISUAL_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("vit", "VIT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("vit_mae", "VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("vit_msn", "VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("wav2vec2", "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("wav2vec2-conformer", "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("xclip", "X_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -418,6 +420,7 @@ ("visual_bert", "VisualBERT"), ("vit", "ViT"), ("vit_mae", "ViTMAE"), + ("vit_msn", "ViTMSN"), ("wav2vec2", "Wav2Vec2"), ("wav2vec2-conformer", "Wav2Vec2-Conformer"), ("wav2vec2_phoneme", "Wav2Vec2Phoneme"), diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index 3f2265875f2a69..73fe1ad42ad195 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -74,6 +74,7 @@ ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), + ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index d7c5f1772f13bd..936e9c8bdc479c 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -139,6 +139,7 @@ ("visual_bert", "VisualBertModel"), ("vit", "ViTModel"), ("vit_mae", "ViTMAEModel"), + ("vit_msn", "ViTMSNModel"), ("wav2vec2", "Wav2Vec2Model"), ("wav2vec2-conformer", "Wav2Vec2ConformerModel"), ("wavlm", "WavLMModel"), @@ -367,6 +368,7 @@ ("swinv2", "Swinv2ForImageClassification"), ("van", "VanForImageClassification"), ("vit", "ViTForImageClassification"), + ("vit_msn", "ViTMSNForImageClassification"), ] ) diff --git a/src/transformers/models/vit_msn/__init__.py b/src/transformers/models/vit_msn/__init__.py new file mode 100644 index 00000000000000..832e730c5881c6 --- /dev/null +++ b/src/transformers/models/vit_msn/__init__.py @@ -0,0 +1,57 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_vit_msn"] = [ + "VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST", + "ViTMSNModel", + "ViTMSNForImageClassification", + "ViTMSNPreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_vit_msn import ( + VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, + ViTMSNForImageClassification, + ViTMSNModel, + ViTMSNPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/vit_msn/configuration_vit_msn.py b/src/transformers/models/vit_msn/configuration_vit_msn.py new file mode 100644 index 00000000000000..057824e5d4e133 --- /dev/null +++ b/src/transformers/models/vit_msn/configuration_vit_msn.py @@ -0,0 +1,117 @@ +# coding=utf-8 +# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" ViT MSN model configuration""" + + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json", + # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn +} + + +class ViTMSNConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ViTMSNModel`]. It is used to instantiate an ViT + MSN model according to the specified arguments, defining the model architecture. Instantiating a configuration with + the defaults will yield a similar configuration to that of the ViT + [facebook/vit_msn_base](https://huggingface.co/facebook/vit_msn_base) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-06): + The epsilon used by the layer normalization layers. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to the queries, keys and values. + + Example: + + ```python + >>> from transformers import ViTMSNModel, ViTMSNConfig + + >>> # Initializing a ViT MSN vit-msn-base style configuration + >>> configuration = ViTConfig() + + >>> # Initializing a model from the vit-msn-base style configuration + >>> model = ViTMSNModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "vit_msn" + + def __init__( + self, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-06, + image_size=224, + patch_size=16, + num_channels=3, + qkv_bias=True, + **kwargs + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.qkv_bias = qkv_bias diff --git a/src/transformers/models/vit_msn/convert_msn_to_pytorch.py b/src/transformers/models/vit_msn/convert_msn_to_pytorch.py new file mode 100644 index 00000000000000..535f5f742d631f --- /dev/null +++ b/src/transformers/models/vit_msn/convert_msn_to_pytorch.py @@ -0,0 +1,236 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert ViT MSN checkpoints from the original repository: https://github.com/facebookresearch/msn""" + +import argparse + +import torch +from PIL import Image + +import requests +from transformers import ViTFeatureExtractor, ViTMSNConfig, ViTMSNModel +from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD + + +torch.set_grad_enabled(False) + + +# here we list all keys to be renamed (original name on the left, our name on the right) +def create_rename_keys(config, base_model=False): + rename_keys = [] + for i in range(config.num_hidden_layers): + # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms + rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight")) + rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias")) + rename_keys.append( + (f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") + ) + rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias")) + rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight")) + rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias")) + rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight")) + rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias")) + rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight")) + rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias")) + + # projection layer + position embeddings + rename_keys.extend( + [ + ("module.cls_token", "vit.embeddings.cls_token"), + ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), + ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), + ("module.pos_embed", "vit.embeddings.position_embeddings"), + ] + ) + + if base_model: + # layernorm + pooler + rename_keys.extend( + [ + ("module.norm.weight", "layernorm.weight"), + ("module.norm.bias", "layernorm.bias"), + ] + ) + + # if just the base model, we should remove "vit" from all keys that start with "vit" + rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys] + else: + # layernorm + classification head + rename_keys.extend( + [ + ("norm.weight", "vit.layernorm.weight"), + ("norm.bias", "vit.layernorm.bias"), + ("head.weight", "classifier.weight"), + ("head.bias", "classifier.bias"), + ] + ) + + return rename_keys + + +# we split up the matrix of each encoder layer into queries, keys and values +def read_in_q_k_v(state_dict, config, base_model=False): + for i in range(config.num_hidden_layers): + if base_model: + prefix = "" + else: + prefix = "vit." + # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) + in_proj_weight = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight") + in_proj_bias = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias") + # next, add query, keys and values (in that order) to the state dict + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ + : config.hidden_size, : + ] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ + config.hidden_size : config.hidden_size * 2, : + ] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ + config.hidden_size : config.hidden_size * 2 + ] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ + -config.hidden_size :, : + ] + state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] + + +def remove_classification_head_(state_dict): + ignore_keys = ["head.weight", "head.bias"] + for k in ignore_keys: + state_dict.pop(k, None) + + +def remove_projection_head(state_dict): + # projection head is used in the self-supervised pre-training in MSN, + # for downstream task it's not needed. + ignore_keys = [ + "module.fc.fc1.weight", + "module.fc.fc1.bias", + "module.fc.bn1.weight", + "module.fc.bn1.bias", + "module.fc.bn1.running_mean", + "module.fc.bn1.running_var", + "module.fc.bn1.num_batches_tracked", + "module.fc.fc2.weight", + "module.fc.fc2.bias", + "module.fc.bn2.weight", + "module.fc.bn2.bias", + "module.fc.bn2.running_mean", + "module.fc.bn2.running_var", + "module.fc.bn2.num_batches_tracked", + "module.fc.fc3.weight", + "module.fc.fc3.bias", + ] + for k in ignore_keys: + state_dict.pop(k, None) + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +def convert_vit_msn_checkpoint(checkpoint_url, pytorch_dump_folder_path): + config = ViTMSNConfig() + config.num_labels = 1000 + + if "s16" in checkpoint_url: + config.hidden_size = 384 + config.intermediate_size = 1536 + config.num_attention_heads = 6 + elif "l16" in checkpoint_url: + config.hidden_size = 1024 + config.intermediate_size = 4096 + config.num_hidden_layers = 24 + config.num_attention_heads = 16 + config.hidden_dropout_prob = 0.1 + elif "b4" in checkpoint_url: + config.patch_size = 4 + elif "l7" in checkpoint_url: + config.patch_size = 7 + config.hidden_size = 1024 + config.intermediate_size = 4096 + config.num_hidden_layers = 24 + config.num_attention_heads = 16 + config.hidden_dropout_prob = 0.1 + + model = ViTMSNModel(config) + + state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["target_encoder"] + + feature_extractor = ViTFeatureExtractor(size=config.image_size) + + remove_projection_head(state_dict) + rename_keys = create_rename_keys(config, base_model=True) + + for src, dest in rename_keys: + rename_key(state_dict, src, dest) + read_in_q_k_v(state_dict, config, base_model=True) + + model.load_state_dict(state_dict) + model.eval() + + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + + image = Image.open(requests.get(url, stream=True).raw) + feature_extractor = ViTFeatureExtractor( + size=config.image_size, image_mean=IMAGENET_DEFAULT_MEAN, image_std=IMAGENET_DEFAULT_STD + ) + inputs = feature_extractor(images=image, return_tensors="pt") + + # forward pass + torch.manual_seed(2) + outputs = model(**inputs) + last_hidden_state = outputs.last_hidden_state + + # The following Colab Notebook was used to generate these outputs: + # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb + if "s16" in checkpoint_url: + expected_slice = torch.tensor([[-1.0915, -1.4876, -1.1809]]) + elif "b16" in checkpoint_url: + expected_slice = torch.tensor([[14.2889, -18.9045, 11.7281]]) + elif "l16" in checkpoint_url: + expected_slice = torch.tensor([[41.5028, -22.8681, 45.6475]]) + elif "b4" in checkpoint_url: + expected_slice = torch.tensor([[-4.3868, 5.2932, -0.4137]]) + else: + expected_slice = torch.tensor([[-0.1792, -0.6465, 2.4263]]) + + # verify logits + assert torch.allclose(last_hidden_state[:, 0, :3], expected_slice, atol=1e-4) + + print(f"Saving model to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + + print(f"Saving feature extractor to {pytorch_dump_folder_path}") + feature_extractor.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--checkpoint_url", + default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar", + type=str, + help="URL of the checkpoint you'd like to convert.", + ) + parser.add_argument( + "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." + ) + + args = parser.parse_args() + convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) diff --git a/src/transformers/models/vit_msn/modeling_vit_msn.py b/src/transformers/models/vit_msn/modeling_vit_msn.py new file mode 100644 index 00000000000000..314b3dbd5bfdbd --- /dev/null +++ b/src/transformers/models/vit_msn/modeling_vit_msn.py @@ -0,0 +1,695 @@ +# coding=utf-8 +# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch ViT MSN (masked siamese network) model.""" + + +import collections.abc +import math +from typing import Dict, List, Optional, Set, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput, ImageClassifierOutput +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_vit_msn import ViTMSNConfig + + +logger = logging.get_logger(__name__) + + +_CONFIG_FOR_DOC = "ViTMSNConfig" +_CHECKPOINT_FOR_DOC = "sayakpaul/vit-msn-small" +VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "sayakpaul/vit-msn-small", + # See all ViTMSN models at https://huggingface.co/models?filter=vit_msn +] + + +class ViTMSNEmbeddings(nn.Module): + """ + Construct the CLS token, position and patch embeddings. Optionally, also the mask token. + """ + + def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False) -> None: + super().__init__() + + self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) + self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None + self.patch_embeddings = ViTMSNPatchEmbeddings(config) + num_patches = self.patch_embeddings.num_patches + self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.config = config + + def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: + """ + This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher + resolution images. + + Source: + https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 + """ + + num_patches = embeddings.shape[1] - 1 + num_positions = self.position_embeddings.shape[1] - 1 + if num_patches == num_positions and height == width: + return self.position_embeddings + class_pos_embed = self.position_embeddings[:, 0] + patch_pos_embed = self.position_embeddings[:, 1:] + dim = embeddings.shape[-1] + patch_window_height = height // self.config.patch_size + patch_window_width = width // self.config.patch_size + # we add a small number to avoid floating point error in the interpolation + # see discussion at https://github.com/facebookresearch/dino/issues/8 + patch_window_height, patch_window_width = patch_window_height + 0.1, patch_window_width + 0.1 + patch_pos_embed = patch_pos_embed.reshape(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) + patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) + patch_pos_embed = nn.functional.interpolate( + patch_pos_embed, + scale_factor=( + patch_window_height / math.sqrt(num_positions), + patch_window_width / math.sqrt(num_positions), + ), + mode="bicubic", + align_corners=False, + ) + patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1) + + def forward( + self, + pixel_values: torch.Tensor, + bool_masked_pos: Optional[torch.BoolTensor] = None, + interpolate_pos_encoding: bool = False, + ) -> torch.Tensor: + batch_size, num_channels, height, width = pixel_values.shape + embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) + + if bool_masked_pos is not None: + seq_length = embeddings.shape[1] + mask_tokens = self.mask_token.expand(batch_size, seq_length, -1) + # replace the masked visual tokens by mask_tokens + mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens) + embeddings = embeddings * (1.0 - mask) + mask_tokens * mask + + # add the [CLS] token to the embedded patch tokens + cls_tokens = self.cls_token.expand(batch_size, -1, -1) + embeddings = torch.cat((cls_tokens, embeddings), dim=1) + + # add positional encoding to each token + if interpolate_pos_encoding: + embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) + else: + embeddings = embeddings + self.position_embeddings + + embeddings = self.dropout(embeddings) + + return embeddings + + +# Copied from transformers.models.vit.modeling_vit.ViTPatchEmbeddings with ViT->ViTMSN +class ViTMSNPatchEmbeddings(nn.Module): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config): + super().__init__() + image_size, patch_size = config.image_size, config.patch_size + num_channels, hidden_size = config.num_channels, config.hidden_size + + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.num_patches = num_patches + + self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size) + + def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor: + batch_size, num_channels, height, width = pixel_values.shape + if num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + if not interpolate_pos_encoding: + if height != self.image_size[0] or width != self.image_size[1]: + raise ValueError( + f"Input image size ({height}*{width}) doesn't match model" + f" ({self.image_size[0]}*{self.image_size[1]})." + ) + embeddings = self.projection(pixel_values).flatten(2).transpose(1, 2) + return embeddings + + +# Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->ViTMSN +class ViTMSNSelfAttention(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size {config.hidden_size,} is not a multiple of the number of attention " + f"heads {config.num_attention_heads}." + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + mixed_query_layer = self.query(hidden_states) + + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->ViTMSN +class ViTMSNSelfOutput(nn.Module): + """ + The residual connection is defined in ViTMSNLayer instead of here (as is the case with other models), due to the + layernorm applied before each block. + """ + + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->ViTMSN +class ViTMSNAttention(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.attention = ViTMSNSelfAttention(config) + self.output = ViTMSNSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads: Set[int]) -> None: + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.attention.query = prune_linear_layer(self.attention.query, index) + self.attention.key = prune_linear_layer(self.attention.key, index) + self.attention.value = prune_linear_layer(self.attention.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads) + self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_outputs = self.attention(hidden_states, head_mask, output_attentions) + + attention_output = self.output(self_outputs[0], hidden_states) + + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->ViTMSN +class ViTMSNIntermediate(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->ViTMSN +class ViTMSNOutput(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + + hidden_states = hidden_states + input_tensor + + return hidden_states + + +# Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->ViTMSN +class ViTMSNLayer(nn.Module): + """This corresponds to the Block class in the timm implementation.""" + + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = ViTMSNAttention(config) + self.intermediate = ViTMSNIntermediate(config) + self.output = ViTMSNOutput(config) + self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]: + self_attention_outputs = self.attention( + self.layernorm_before(hidden_states), # in ViTMSN, layernorm is applied before self-attention + head_mask, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + # first residual connection + hidden_states = attention_output + hidden_states + + # in ViTMSN, layernorm is also applied after self-attention + layer_output = self.layernorm_after(hidden_states) + layer_output = self.intermediate(layer_output) + + # second residual connection is done here + layer_output = self.output(layer_output, hidden_states) + + outputs = (layer_output,) + outputs + + return outputs + + +# Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->ViTMSN +class ViTMSNEncoder(nn.Module): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__() + self.config = config + self.layer = nn.ModuleList([ViTMSNLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> Union[tuple, BaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + layer_head_mask, + ) + else: + layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class ViTMSNPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ViTMSNConfig + base_model_prefix = "vit" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + # todo: Resort to https://github.com/facebookresearch/msn/blob/main/src/deit.py#L200-#L211 + # when creating pre-training scripts. + def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module: ViTMSNEncoder, value: bool = False) -> None: + if isinstance(module, ViTMSNEncoder): + module.gradient_checkpointing = value + + +VIT_MSN_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`ViTMSNConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +VIT_MSN_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See + [`AutoFeatureExtractor.__call__`] for details. + + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + interpolate_pos_encoding (`bool`, *optional*): + Whether to interpolate the pre-trained position encodings. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare ViTMSN Model outputting raw hidden-states without any specific head on top.", + VIT_MSN_START_DOCSTRING, +) +class ViTMSNModel(ViTMSNPreTrainedModel): + def __init__(self, config: ViTMSNConfig, use_mask_token: bool = False): + super().__init__(config) + self.config = config + + self.embeddings = ViTMSNEmbeddings(config, use_mask_token=use_mask_token) + self.encoder = ViTMSNEncoder(config) + + self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> ViTMSNPatchEmbeddings: + return self.embeddings.patch_embeddings + + def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None: + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + bool_masked_pos: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + interpolate_pos_encoding: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoFeatureExtractor, ViTMSNModel + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-msn-small") + >>> model = ViTMSNModel.from_pretrained("facebook/vit-msn-small") + >>> inputs = feature_extractor(images=image, return_tensors="pt") + >>> with torch.no_grad(): + ... outputs = model(**inputs) + >>> last_hidden_states = outputs.last_hidden_state + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + pixel_values, bool_masked_pos=bool_masked_pos, interpolate_pos_encoding=interpolate_pos_encoding + ) + + encoder_outputs = self.encoder( + embedding_output, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + sequence_output = self.layernorm(sequence_output) + + if not return_dict: + head_outputs = (sequence_output,) + return head_outputs + encoder_outputs[1:] + + return BaseModelOutput( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +# Caution: We don't have the weights for the classification head yet. This class +# is here for the users that are interested to fine-tune the base model (ViTMSNModel). +@add_start_docstrings( + """ + ViTMSN Model with an image classification head on top e.g. for ImageNet. + """, + VIT_MSN_START_DOCSTRING, +) +class ViTMSNForImageClassification(ViTMSNPreTrainedModel): + def __init__(self, config: ViTMSNConfig) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.vit = ViTMSNModel(config) + + # Classifier head + self.classifier = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity() + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(VIT_MSN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=ImageClassifierOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + interpolate_pos_encoding: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, ImageClassifierOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoFeatureExtractor, ViTMSNForImageClassification + >>> import torch + >>> from PIL import Image + >>> import requests + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/vit-msn-small") + >>> model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small") + + >>> inputs = feature_extractor(images=image, return_tensors="pt") + >>> with torch.no_grad(): + ... logits = model(**inputs).logits + >>> # model predicts one of the 1000 ImageNet classes + >>> predicted_label = logits.argmax(-1).item() + >>> print(model.config.id2label[predicted_label]) + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.vit( + pixel_values, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + interpolate_pos_encoding=interpolate_pos_encoding, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.classifier(sequence_output[:, 0, :]) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index b656cee9c89bdc..e9f1bae358f3ac 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -5150,6 +5150,30 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class ViTMSNForImageClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMSNModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class ViTMSNPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/vit_msn/__init__.py b/tests/models/vit_msn/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/vit_msn/test_modeling_vit_msn.py b/tests/models/vit_msn/test_modeling_vit_msn.py new file mode 100644 index 00000000000000..b858da42b3d4e3 --- /dev/null +++ b/tests/models/vit_msn/test_modeling_vit_msn.py @@ -0,0 +1,239 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch ViTMSN model. """ + + +import inspect +import unittest + +from transformers import ViTMSNConfig +from transformers.testing_utils import require_torch, require_vision, slow, torch_device +from transformers.utils import cached_property, is_torch_available, is_vision_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor + + +if is_torch_available(): + import torch + from torch import nn + + from transformers import ViTMSNForImageClassification, ViTMSNModel + from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from PIL import Image + + from transformers import ViTFeatureExtractor + + +class ViTMAEModelTester: + def __init__( + self, + parent, + batch_size=13, + image_size=30, + patch_size=2, + num_channels=3, + is_training=True, + use_labels=True, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + type_sequence_label_size=10, + initializer_range=0.02, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.is_training = is_training + self.use_labels = use_labels + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.scope = scope + + # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) + num_patches = (image_size // patch_size) ** 2 + self.seq_length = num_patches + 1 + + def prepare_config_and_inputs(self): + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + + labels = None + if self.use_labels: + labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + + config = self.get_config() + + return config, pixel_values, labels + + def get_config(self): + return ViTMSNConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + initializer_range=self.initializer_range, + ) + + def create_and_check_model(self, config, pixel_values, labels): + model = ViTMSNModel(config=config) + model.to(torch_device) + model.eval() + result = model(pixel_values) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + + def create_and_check_for_image_classification(self, config, pixel_values, labels): + config.num_labels = self.type_sequence_label_size + model = ViTMSNForImageClassification(config) + model.to(torch_device) + model.eval() + result = model(pixel_values, labels=labels) + print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}") + print("Labels: {labels}") + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) + + # test greyscale images + config.num_channels = 1 + model = ViTMSNForImageClassification(config) + model.to(torch_device) + model.eval() + + pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) + result = model(pixel_values) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values, labels = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_torch +class ViTMSNModelTest(ModelTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as ViTMAE does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () + + test_pruning = False + test_torchscript = False + test_resize_embeddings = False + test_head_masking = False + + def setUp(self): + self.model_tester = ViTMAEModelTester(self) + self.config_tester = ConfigTester(self, config_class=ViTMSNConfig, has_text_modality=False, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + @unittest.skip(reason="ViTMAE does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + def test_model_common_attributes(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) + x = model.get_output_embeddings() + self.assertTrue(x is None or isinstance(x, nn.Linear)) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_for_image_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_image_classification(*config_and_inputs) + + @slow + def test_model_from_pretrained(self): + for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = ViTMSNModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + +# We will verify our results on an image of cute cats +def prepare_img(): + image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") + return image + + +@require_torch +@require_vision +class ViTMSNModelIntegrationTest(unittest.TestCase): + @cached_property + def default_feature_extractor(self): + return ViTFeatureExtractor.from_pretrained("sayakpaul/vit-msn-small") if is_vision_available() else None + + @slow + def test_inference_image_classification_head(self): + torch.manual_seed(2) + model = ViTMSNForImageClassification.from_pretrained("sayakpaul/vit-msn-small").to(torch_device) + + feature_extractor = self.default_feature_extractor + image = prepare_img() + inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + # verify the logits + expected_shape = torch.Size((1, 1000)) + self.assertEqual(outputs.logits.shape, expected_shape) + + expected_slice = torch.tensor([-0.0803, -0.4454, -0.2375]).to(torch_device) + + self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 0a27bdb527fc94..59451aca8d2f8a 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -84,6 +84,7 @@ src/transformers/models/vision_encoder_decoder/modeling_vision_encoder_decoder.p src/transformers/models/vit/modeling_vit.py src/transformers/models/vit/modeling_tf_vit.py src/transformers/models/vit_mae/modeling_vit_mae.py +src/transformers/models/vit_msn/modeling_vit_msn.py src/transformers/models/wav2vec2/modeling_wav2vec2.py src/transformers/models/wav2vec2/tokenization_wav2vec2.py src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py From cf6308ef9b1ae3903a007520e8f2a98f15ed7341 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Thu, 22 Sep 2022 13:21:05 +0200 Subject: [PATCH 365/539] Improve conditional detr docs (#19154) Co-authored-by: Niels Rogge --- docs/source/en/model_doc/conditional_detr.mdx | 4 ++ .../modeling_conditional_detr.py | 48 +++++++++++++------ utils/documentation_tests.txt | 1 + 3 files changed, 39 insertions(+), 14 deletions(-) diff --git a/docs/source/en/model_doc/conditional_detr.mdx b/docs/source/en/model_doc/conditional_detr.mdx index d5846cbfee3270..b6ded59d1c7e11 100644 --- a/docs/source/en/model_doc/conditional_detr.mdx +++ b/docs/source/en/model_doc/conditional_detr.mdx @@ -20,6 +20,10 @@ The abstract from the paper is the following: *The recently-developed DETR approach applies the transformer encoder and decoder architecture to object detection and achieves promising performance. In this paper, we handle the critical issue, slow training convergence, and present a conditional cross-attention mechanism for fast DETR training. Our approach is motivated by that the cross-attention in DETR relies highly on the content embeddings for localizing the four extremities and predicting the box, which increases the need for high-quality content embeddings and thus the training difficulty. Our approach, named conditional DETR, learns a conditional spatial query from the decoder embedding for decoder multi-head cross-attention. The benefit is that through the conditional spatial query, each cross-attention head is able to attend to a band containing a distinct region, e.g., one object extremity or a region inside the object box. This narrows down the spatial range for localizing the distinct regions for object classification and box regression, thus relaxing the dependence on the content embeddings and easing the training. Empirical results show that conditional DETR converges 6.7× faster for the backbones R50 and R101 and 10× faster for stronger backbones DC5-R50 and DC5-R101. Code is available at https://github.com/Atten4Vis/ConditionalDETR.* + + + Conditional DETR shows much faster convergence compared to the original DETR. Taken from the original paper. This model was contributed by [DepuMeng](https://huggingface.co/DepuMeng). The original code can be found [here](https://github.com/Atten4Vis/ConditionalDETR). diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py index 79199ce06e4283..626f19010653f0 100644 --- a/src/transformers/models/conditional_detr/modeling_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -1515,15 +1515,15 @@ def forward( Examples: ```python - >>> from transformers import ConditionalDetrFeatureExtractor, ConditionalDetrModel + >>> from transformers import AutoFeatureExtractor, AutoModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) - >>> feature_extractor = ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") - >>> model = ConditionalDetrModel.from_pretrained("microsoft/conditional-detr-resnet-50") + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") + >>> model = AutoModel.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # prepare image for the model >>> inputs = feature_extractor(images=image, return_tensors="pt") @@ -1683,21 +1683,36 @@ def forward( Examples: ```python - >>> from transformers import ConditionalDetrFeatureExtractor, ConditionalDetrForObjectDetection + >>> from transformers import AutoFeatureExtractor, AutoModelForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) - >>> feature_extractor = ConditionalDetrFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") - >>> model = ConditionalDetrForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50") + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") + >>> model = AutoModelForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50") >>> inputs = feature_extractor(images=image, return_tensors="pt") + >>> outputs = model(**inputs) - >>> # model predicts bounding boxes and corresponding COCO classes - >>> logits = outputs.logits - >>> bboxes = outputs.pred_boxes + + >>> # convert outputs (bounding boxes and class logits) to COCO API + >>> target_sizes = torch.tensor([image.size[::-1]]) + >>> results = feature_extractor.post_process(outputs, target_sizes=target_sizes)[0] + >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): + ... box = [round(i, 2) for i in box.tolist()] + ... # let's only keep detections with score > 0.5 + ... if score > 0.5: + ... print( + ... f"Detected {model.config.id2label[label.item()]} with confidence " + ... f"{round(score.item(), 3)} at location {box}" + ... ) + Detected remote with confidence 0.833 at location [38.31, 72.1, 177.63, 118.45] + Detected cat with confidence 0.831 at location [9.2, 51.38, 321.13, 469.0] + Detected cat with confidence 0.804 at location [340.3, 16.85, 642.93, 370.95] + Detected remote with confidence 0.683 at location [334.48, 73.49, 366.37, 190.01] + Detected couch with confidence 0.535 at location [0.52, 1.19, 640.35, 475.1] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict @@ -1860,16 +1875,21 @@ def forward( >>> import torch >>> import numpy - >>> from transformers import ConditionalDetrFeatureExtractor, ConditionalDetrForSegmentation + >>> from transformers import ( + ... AutoFeatureExtractor, + ... ConditionalDetrConfig, + ... ConditionalDetrForSegmentation, + ... ) >>> from transformers.models.conditional_detr.feature_extraction_conditional_detr import rgb_to_id >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) - >>> feature_extractor = ConditionalDetrFeatureExtractor.from_pretrained( - ... "facebook/conditional_detr-resnet-50-panoptic" - ... ) - >>> model = ConditionalDetrForSegmentation.from_pretrained("facebook/conditional_detr-resnet-50-panoptic") + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/conditional-detr-resnet-50") + + >>> # randomly initialize all weights of the model + >>> config = ConditionalDetrConfig() + >>> model = ConditionalDetrForSegmentation(config) >>> # prepare image for the model >>> inputs = feature_extractor(images=image, return_tensors="pt") diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 59451aca8d2f8a..348cdb7af881aa 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -21,6 +21,7 @@ src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py src/transformers/models/big_bird/modeling_big_bird.py src/transformers/models/blenderbot/modeling_blenderbot.py src/transformers/models/blenderbot_small/modeling_blenderbot_small.py +src/transformers/models/conditional_detr/modeling_conditional_detr.py src/transformers/models/convnext/modeling_convnext.py src/transformers/models/ctrl/modeling_ctrl.py src/transformers/models/cvt/modeling_cvt.py From 1b5ab39cf4430253e70570cb9b6bacbb5c595133 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Thu, 22 Sep 2022 13:21:51 +0100 Subject: [PATCH 366/539] TF: check embeddings range (#19102) --- src/transformers/modeling_tf_utils.py | 1 + .../models/albert/modeling_tf_albert.py | 10 +++++++ .../models/bert/modeling_tf_bert.py | 10 +++++++ .../blenderbot/modeling_tf_blenderbot.py | 20 +++++++++++++ .../modeling_tf_blenderbot_small.py | 20 +++++++++++++ .../models/clip/modeling_tf_clip.py | 10 +++++++ .../models/convbert/modeling_tf_convbert.py | 10 +++++++ .../models/ctrl/modeling_tf_ctrl.py | 10 +++++++ .../models/deberta/modeling_tf_deberta.py | 10 +++++++ .../deberta_v2/modeling_tf_deberta_v2.py | 10 +++++++ .../distilbert/modeling_tf_distilbert.py | 10 +++++++ .../models/electra/modeling_tf_electra.py | 10 +++++++ .../models/flaubert/modeling_tf_flaubert.py | 10 +++++++ .../models/funnel/modeling_tf_funnel.py | 10 +++++++ .../models/gpt2/modeling_tf_gpt2.py | 10 +++++++ .../models/gptj/modeling_tf_gptj.py | 10 +++++++ .../models/layoutlm/modeling_tf_layoutlm.py | 10 +++++++ .../layoutlmv3/modeling_tf_layoutlmv3.py | 10 +++++++ .../models/led/modeling_tf_led.py | 20 +++++++++++++ .../longformer/modeling_tf_longformer.py | 10 +++++++ .../models/lxmert/modeling_tf_lxmert.py | 10 +++++++ .../models/marian/modeling_tf_marian.py | 20 +++++++++++++ .../models/mbart/modeling_tf_mbart.py | 20 +++++++++++++ .../mobilebert/modeling_tf_mobilebert.py | 10 +++++++ .../models/mpnet/modeling_tf_mpnet.py | 10 +++++++ .../models/openai/modeling_tf_openai.py | 20 +++++++++++++ .../models/opt/modeling_tf_opt.py | 10 +++++++ .../models/pegasus/modeling_tf_pegasus.py | 20 +++++++++++++ .../models/rembert/modeling_tf_rembert.py | 10 +++++++ .../models/roberta/modeling_tf_roberta.py | 10 +++++++ .../models/roformer/modeling_tf_roformer.py | 10 +++++++ .../modeling_tf_speech_to_text.py | 10 +++++++ src/transformers/models/t5/modeling_tf_t5.py | 10 +++++++ .../models/xglm/modeling_tf_xglm.py | 10 +++++++ .../models/xlm/modeling_tf_xlm.py | 10 +++++++ .../models/xlnet/modeling_tf_xlnet.py | 10 +++++++ ...tf_{{cookiecutter.lowercase_modelname}}.py | 30 +++++++++++++++++++ 37 files changed, 451 insertions(+) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index d06c619cdf6a16..c0f15592866e71 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -3054,6 +3054,7 @@ class TFWrappedEmbeddings: def __init__(self, layer, abs_scope_name=None): self._layer = layer self._abs_scope_name = abs_scope_name + self.vocab_size = self._layer.vocab_size def call(self, inputs, mode="embedding"): if self._abs_scope_name is None: diff --git a/src/transformers/models/albert/modeling_tf_albert.py b/src/transformers/models/albert/modeling_tf_albert.py index 5bef3cc39c00ab..f3f1169554d0ac 100644 --- a/src/transformers/models/albert/modeling_tf_albert.py +++ b/src/transformers/models/albert/modeling_tf_albert.py @@ -190,6 +190,16 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/bert/modeling_tf_bert.py b/src/transformers/models/bert/modeling_tf_bert.py index b1240b47639de3..12c810d0122c77 100644 --- a/src/transformers/models/bert/modeling_tf_bert.py +++ b/src/transformers/models/bert/modeling_tf_bert.py @@ -200,6 +200,16 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py index c56b30cf83a25e..73a1cc0226fe60 100644 --- a/src/transformers/models/blenderbot/modeling_tf_blenderbot.py +++ b/src/transformers/models/blenderbot/modeling_tf_blenderbot.py @@ -726,6 +726,16 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -923,6 +933,16 @@ def call( positions = self.embed_positions(input_shape, position_ids=position_ids) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds diff --git a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py index 8383dc097295c7..cd8e7351ba1781 100644 --- a/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py +++ b/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py @@ -731,6 +731,16 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -921,6 +931,16 @@ def call( past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] diff --git a/src/transformers/models/clip/modeling_tf_clip.py b/src/transformers/models/clip/modeling_tf_clip.py index df490062101792..6edd09c2e12ec3 100644 --- a/src/transformers/models/clip/modeling_tf_clip.py +++ b/src/transformers/models/clip/modeling_tf_clip.py @@ -241,6 +241,16 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/convbert/modeling_tf_convbert.py b/src/transformers/models/convbert/modeling_tf_convbert.py index f69e54282d02f1..b778b73c968f83 100644 --- a/src/transformers/models/convbert/modeling_tf_convbert.py +++ b/src/transformers/models/convbert/modeling_tf_convbert.py @@ -126,6 +126,16 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/ctrl/modeling_tf_ctrl.py b/src/transformers/models/ctrl/modeling_tf_ctrl.py index 0cfcb44f65ef8a..81872b9671c8c0 100644 --- a/src/transformers/models/ctrl/modeling_tf_ctrl.py +++ b/src/transformers/models/ctrl/modeling_tf_ctrl.py @@ -338,6 +338,16 @@ def call( position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.w.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.w.vocab_size})" + ), + ) inputs_embeds = self.w(input_ids, mode="embedding") seq_len = input_shape[-1] mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0) diff --git a/src/transformers/models/deberta/modeling_tf_deberta.py b/src/transformers/models/deberta/modeling_tf_deberta.py index 652389f3b54a29..ff8e41d3abeda5 100644 --- a/src/transformers/models/deberta/modeling_tf_deberta.py +++ b/src/transformers/models/deberta/modeling_tf_deberta.py @@ -783,6 +783,16 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py index bb1d55692c485a..3890731b4dbc4d 100644 --- a/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py +++ b/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py @@ -872,6 +872,16 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/distilbert/modeling_tf_distilbert.py b/src/transformers/models/distilbert/modeling_tf_distilbert.py index bc1df7f41aeb43..07ad3593a24489 100644 --- a/src/transformers/models/distilbert/modeling_tf_distilbert.py +++ b/src/transformers/models/distilbert/modeling_tf_distilbert.py @@ -110,6 +110,16 @@ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=F assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/electra/modeling_tf_electra.py b/src/transformers/models/electra/modeling_tf_electra.py index d2426cae9c23b3..8ed75b3bffe697 100644 --- a/src/transformers/models/electra/modeling_tf_electra.py +++ b/src/transformers/models/electra/modeling_tf_electra.py @@ -530,6 +530,16 @@ def call( raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/flaubert/modeling_tf_flaubert.py b/src/transformers/models/flaubert/modeling_tf_flaubert.py index b33e057232485d..3601c1cb9a568d 100644 --- a/src/transformers/models/flaubert/modeling_tf_flaubert.py +++ b/src/transformers/models/flaubert/modeling_tf_flaubert.py @@ -573,6 +573,16 @@ def call( # embeddings if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embeddings.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embeddings.vocab_size})" + ), + ) inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids) diff --git a/src/transformers/models/funnel/modeling_tf_funnel.py b/src/transformers/models/funnel/modeling_tf_funnel.py index da71be87d95d18..2ec5debbf0663c 100644 --- a/src/transformers/models/funnel/modeling_tf_funnel.py +++ b/src/transformers/models/funnel/modeling_tf_funnel.py @@ -110,6 +110,16 @@ def call(self, input_ids=None, inputs_embeds=None, training=False): assert not (input_ids is not None and inputs_embeds is not None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(self.weight, input_ids) final_embeddings = self.LayerNorm(inputs=inputs_embeds) diff --git a/src/transformers/models/gpt2/modeling_tf_gpt2.py b/src/transformers/models/gpt2/modeling_tf_gpt2.py index 5b1e21a3c22c1f..cd3f410b6af29d 100644 --- a/src/transformers/models/gpt2/modeling_tf_gpt2.py +++ b/src/transformers/models/gpt2/modeling_tf_gpt2.py @@ -442,6 +442,16 @@ def call( position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = self.wte(input_ids, mode="embedding") position_embeds = tf.gather(self.wpe, position_ids) diff --git a/src/transformers/models/gptj/modeling_tf_gptj.py b/src/transformers/models/gptj/modeling_tf_gptj.py index 67d17cdc05f80e..d7fa53bfe93dfc 100644 --- a/src/transformers/models/gptj/modeling_tf_gptj.py +++ b/src/transformers/models/gptj/modeling_tf_gptj.py @@ -440,6 +440,16 @@ def call( position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.wte.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.wte.vocab_size})" + ), + ) inputs_embeds = self.wte(input_ids, mode="embedding") if token_type_ids is not None: diff --git a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py index 26ae260dd17de0..74edd11009cdbb 100644 --- a/src/transformers/models/layoutlm/modeling_tf_layoutlm.py +++ b/src/transformers/models/layoutlm/modeling_tf_layoutlm.py @@ -141,6 +141,16 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py index 6bb3eb54e195b5..c85989913e5f7f 100644 --- a/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py +++ b/src/transformers/models/layoutlmv3/modeling_tf_layoutlmv3.py @@ -240,6 +240,16 @@ def call( token_type_ids = tf.zeros(input_shape, dtype=position_ids.dtype) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.word_embeddings.input_dim, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.word_embeddings.input_dim})" + ), + ) inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index 3702f1cca3bf72..0a803212dabf22 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -1737,6 +1737,16 @@ def call( raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] @@ -2012,6 +2022,16 @@ def call( positions = self.embed_positions(input_shape, past_key_values_length) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds diff --git a/src/transformers/models/longformer/modeling_tf_longformer.py b/src/transformers/models/longformer/modeling_tf_longformer.py index 51b8cca0b07d9a..489bb113f9002a 100644 --- a/src/transformers/models/longformer/modeling_tf_longformer.py +++ b/src/transformers/models/longformer/modeling_tf_longformer.py @@ -540,6 +540,16 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/lxmert/modeling_tf_lxmert.py b/src/transformers/models/lxmert/modeling_tf_lxmert.py index 3b44056a6cede5..8c61a7d868a930 100644 --- a/src/transformers/models/lxmert/modeling_tf_lxmert.py +++ b/src/transformers/models/lxmert/modeling_tf_lxmert.py @@ -227,6 +227,16 @@ def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/marian/modeling_tf_marian.py b/src/transformers/models/marian/modeling_tf_marian.py index 580c38f843d300..a88cbe7b7326cd 100644 --- a/src/transformers/models/marian/modeling_tf_marian.py +++ b/src/transformers/models/marian/modeling_tf_marian.py @@ -772,6 +772,16 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -967,6 +977,16 @@ def call( positions = self.embed_positions(input_shape, position_ids=position_ids) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds diff --git a/src/transformers/models/mbart/modeling_tf_mbart.py b/src/transformers/models/mbart/modeling_tf_mbart.py index ec034a5fa8e470..e870372b7afbfc 100644 --- a/src/transformers/models/mbart/modeling_tf_mbart.py +++ b/src/transformers/models/mbart/modeling_tf_mbart.py @@ -757,6 +757,16 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -959,6 +969,16 @@ def call( positions = self.embed_positions(input_shape, position_ids=position_ids) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds diff --git a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py index 25fe9327460f45..f6138b7ce5da97 100644 --- a/src/transformers/models/mobilebert/modeling_tf_mobilebert.py +++ b/src/transformers/models/mobilebert/modeling_tf_mobilebert.py @@ -214,6 +214,16 @@ def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_em assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/mpnet/modeling_tf_mpnet.py b/src/transformers/models/mpnet/modeling_tf_mpnet.py index 3ceb1489a9bcdb..5aca1575712287 100644 --- a/src/transformers/models/mpnet/modeling_tf_mpnet.py +++ b/src/transformers/models/mpnet/modeling_tf_mpnet.py @@ -145,6 +145,16 @@ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=F assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/openai/modeling_tf_openai.py b/src/transformers/models/openai/modeling_tf_openai.py index 357866228b83a0..74aa798dfabe7f 100644 --- a/src/transformers/models/openai/modeling_tf_openai.py +++ b/src/transformers/models/openai/modeling_tf_openai.py @@ -298,10 +298,30 @@ def call( position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = self.tokens_embed(input_ids, mode="embedding") position_embeds = tf.gather(self.positions_embed, position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + token_type_ids, + tf.cast(self.vocab_size, dtype=token_type_ids.dtype), + message=( + "token_type_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(token_type_ids)} >= {self.vocab_size})" + ), + ) token_type_embeds = self.tokens_embed(token_type_ids, mode="embedding") else: token_type_embeds = 0 diff --git a/src/transformers/models/opt/modeling_tf_opt.py b/src/transformers/models/opt/modeling_tf_opt.py index b523ecceb1078d..43cc9523872ea7 100644 --- a/src/transformers/models/opt/modeling_tf_opt.py +++ b/src/transformers/models/opt/modeling_tf_opt.py @@ -632,6 +632,16 @@ def call( past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) if attention_mask is None: diff --git a/src/transformers/models/pegasus/modeling_tf_pegasus.py b/src/transformers/models/pegasus/modeling_tf_pegasus.py index 97efed9285fc06..a4eeabb41d4e46 100644 --- a/src/transformers/models/pegasus/modeling_tf_pegasus.py +++ b/src/transformers/models/pegasus/modeling_tf_pegasus.py @@ -775,6 +775,16 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -973,6 +983,16 @@ def call( positions = self.embed_positions(input_shape, position_ids=position_ids) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale hidden_states = inputs_embeds diff --git a/src/transformers/models/rembert/modeling_tf_rembert.py b/src/transformers/models/rembert/modeling_tf_rembert.py index be1946f1118be7..ce1c45d479f74d 100644 --- a/src/transformers/models/rembert/modeling_tf_rembert.py +++ b/src/transformers/models/rembert/modeling_tf_rembert.py @@ -124,6 +124,16 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/roberta/modeling_tf_roberta.py b/src/transformers/models/roberta/modeling_tf_roberta.py index 04f4283b2070dc..4b34cadbf19499 100644 --- a/src/transformers/models/roberta/modeling_tf_roberta.py +++ b/src/transformers/models/roberta/modeling_tf_roberta.py @@ -146,6 +146,16 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/roformer/modeling_tf_roformer.py b/src/transformers/models/roformer/modeling_tf_roformer.py index 4c526b694133cb..437aca26a00eab 100644 --- a/src/transformers/models/roformer/modeling_tf_roformer.py +++ b/src/transformers/models/roformer/modeling_tf_roformer.py @@ -177,6 +177,16 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] diff --git a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py index e6e1b0facc0116..ef376914482178 100755 --- a/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py +++ b/src/transformers/models/speech_to_text/modeling_tf_speech_to_text.py @@ -1022,6 +1022,16 @@ def call( past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale else: inputs_embeds = inputs_embeds diff --git a/src/transformers/models/t5/modeling_tf_t5.py b/src/transformers/models/t5/modeling_tf_t5.py index ae39ea26150dba..b83a0fad2ba9bc 100644 --- a/src/transformers/models/t5/modeling_tf_t5.py +++ b/src/transformers/models/t5/modeling_tf_t5.py @@ -681,6 +681,16 @@ def call( if inputs_embeds is None: assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings" + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape diff --git a/src/transformers/models/xglm/modeling_tf_xglm.py b/src/transformers/models/xglm/modeling_tf_xglm.py index 6dd62d270b8e23..9856b16112bb28 100644 --- a/src/transformers/models/xglm/modeling_tf_xglm.py +++ b/src/transformers/models/xglm/modeling_tf_xglm.py @@ -533,6 +533,16 @@ def call( past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index a060986a5704ac..8bc0925c2fd829 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -440,6 +440,16 @@ def call( # embeddings if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embeddings.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embeddings.vocab_size})" + ), + ) inputs_embeds = self.embeddings(input_ids) tensor = inputs_embeds + tf.gather(self.position_embeddings, position_ids) diff --git a/src/transformers/models/xlnet/modeling_tf_xlnet.py b/src/transformers/models/xlnet/modeling_tf_xlnet.py index 1b079d00be2b43..a55a3c72a5ff66 100644 --- a/src/transformers/models/xlnet/modeling_tf_xlnet.py +++ b/src/transformers/models/xlnet/modeling_tf_xlnet.py @@ -680,6 +680,16 @@ def call( if inputs_embeds is not None: word_emb_k = inputs_embeds else: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.word_embedding.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.word_embedding.vocab_size})" + ), + ) word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k, training=training) if target_mapping is not None: diff --git a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py index 8225105ddf134f..b4ab100fe83959 100644 --- a/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py +++ b/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/modeling_tf_{{cookiecutter.lowercase_modelname}}.py @@ -127,6 +127,16 @@ def call( assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] @@ -2305,6 +2315,16 @@ def call( raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) @@ -2494,6 +2514,16 @@ def call( positions = self.embed_positions(input_shape, past_key_values_length) if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.vocab_size})" + ), + ) inputs_embeds = self.embed_tokens(input_ids) hidden_states = inputs_embeds From 83dc6377d0107b462e5d804ffa72d069625bc36b Mon Sep 17 00:00:00 2001 From: Matt Date: Thu, 22 Sep 2022 13:51:27 +0100 Subject: [PATCH 367/539] Reduce LR for TF MLM example test (#19156) --- examples/tensorflow/test_tensorflow_examples.py | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/tensorflow/test_tensorflow_examples.py b/examples/tensorflow/test_tensorflow_examples.py index 9b692ce80cbdd6..f4b383eabe5303 100644 --- a/examples/tensorflow/test_tensorflow_examples.py +++ b/examples/tensorflow/test_tensorflow_examples.py @@ -157,6 +157,7 @@ def test_run_mlm(self): --do_eval --prediction_loss_only --num_train_epochs=1 + --learning_rate=1e-4 """.split() with patch.object(sys, "argv", testargs): From e5b7cff5fe65eac9e54ba88fa3935b3270db0207 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Thu, 22 Sep 2022 21:20:15 +0800 Subject: [PATCH 368/539] update perf_train_cpu_many doc (#19151) Signed-off-by: Wang, Yi A Signed-off-by: Wang, Yi A --- docs/source/en/perf_train_cpu_many.mdx | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/docs/source/en/perf_train_cpu_many.mdx b/docs/source/en/perf_train_cpu_many.mdx index f4f77965748e3e..9e7bd61467aea6 100644 --- a/docs/source/en/perf_train_cpu_many.mdx +++ b/docs/source/en/perf_train_cpu_many.mdx @@ -38,16 +38,30 @@ where `{pytorch_version}` should be your PyTorch version, for instance 1.12.0. Check more approaches for [oneccl_bind_pt installation](https://github.com/intel/torch-ccl). Versions of oneCCL and PyTorch must match. + + +oneccl_bindings_for_pytorch 1.12.0 prebuilt wheel does not work with PyTorch 1.12.1 (it is for PyTorch 1.12.0) + + + ## Intel® MPI library Use this standards-based MPI implementation to deliver flexible, efficient, scalable cluster messaging on Intel® architecture. This component is part of the Intel® oneAPI HPC Toolkit. -It can be installed via [MPI](https://www.intel.com/content/www/us/en/developer/articles/tool/oneapi-standalone-components.html#mpi). -Please set the environment by following command before using it. +oneccl_bindings_for_pytorch is installed along with the MPI tool set. Need to source the environment before using it. +for Intel® oneCCL 1.12.0 ``` -source /opt/intel/oneapi/setvars.sh +oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)") +source $oneccl_bindings_for_pytorch_path/env/setvars.sh ``` +for Intel® oneCCL whose version < 1.12.0 +``` +torch_ccl_path=$(python -c "import torch; import torch_ccl; import os; print(os.path.abspath(os.path.dirname(torch_ccl.__file__)))") +source $torch_ccl_path/env/setvars.sh +``` + + The following "Usage in Trainer" takes mpirun in Intel® MPI library as an example. From 74a3ea473777ec3ad101ce3cba28d5c615459f1b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Sep 2022 17:01:40 +0200 Subject: [PATCH 369/539] Bump oauthlib in /examples/research_projects/decision_transformer (#19080) Bumps [oauthlib](https://github.com/oauthlib/oauthlib) from 3.2.0 to 3.2.1. - [Release notes](https://github.com/oauthlib/oauthlib/releases) - [Changelog](https://github.com/oauthlib/oauthlib/blob/master/CHANGELOG.rst) - [Commits](https://github.com/oauthlib/oauthlib/compare/v3.2.0...v3.2.1) --- updated-dependencies: - dependency-name: oauthlib dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../research_projects/decision_transformer/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/decision_transformer/requirements.txt b/examples/research_projects/decision_transformer/requirements.txt index 6add82e1d5446f..1b57c5a660992c 100644 --- a/examples/research_projects/decision_transformer/requirements.txt +++ b/examples/research_projects/decision_transformer/requirements.txt @@ -118,7 +118,7 @@ mypy-extensions==0.4.3 nltk==3.7 numba==0.55.1 numpy==1.22.3 -oauthlib==3.2.0 +oauthlib==3.2.1 onnx==1.11.0 onnxconverter-common==1.9.0 opt-einsum==3.3.0 From 3a396c59b8187cb92c6ea56da6d4c69b4e27cf62 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Thu, 22 Sep 2022 20:33:01 +0530 Subject: [PATCH 370/539] fix: ckpt paths. (#19159) --- src/transformers/models/vit_msn/modeling_vit_msn.py | 4 ++-- tests/models/vit_msn/test_modeling_vit_msn.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/vit_msn/modeling_vit_msn.py b/src/transformers/models/vit_msn/modeling_vit_msn.py index 314b3dbd5bfdbd..a190c42caa707b 100644 --- a/src/transformers/models/vit_msn/modeling_vit_msn.py +++ b/src/transformers/models/vit_msn/modeling_vit_msn.py @@ -36,9 +36,9 @@ _CONFIG_FOR_DOC = "ViTMSNConfig" -_CHECKPOINT_FOR_DOC = "sayakpaul/vit-msn-small" +_CHECKPOINT_FOR_DOC = "facebook/vit-msn-small" VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST = [ - "sayakpaul/vit-msn-small", + "facebook/vit-msn-small", # See all ViTMSN models at https://huggingface.co/models?filter=vit_msn ] diff --git a/tests/models/vit_msn/test_modeling_vit_msn.py b/tests/models/vit_msn/test_modeling_vit_msn.py index b858da42b3d4e3..0cba3dad009429 100644 --- a/tests/models/vit_msn/test_modeling_vit_msn.py +++ b/tests/models/vit_msn/test_modeling_vit_msn.py @@ -215,12 +215,12 @@ def prepare_img(): class ViTMSNModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): - return ViTFeatureExtractor.from_pretrained("sayakpaul/vit-msn-small") if is_vision_available() else None + return ViTFeatureExtractor.from_pretrained("facebook/vit-msn-small") if is_vision_available() else None @slow def test_inference_image_classification_head(self): torch.manual_seed(2) - model = ViTMSNForImageClassification.from_pretrained("sayakpaul/vit-msn-small").to(torch_device) + model = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() From 8d59385f124dd1b330cac7eaa7162799870793ec Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 22 Sep 2022 14:38:32 -0400 Subject: [PATCH 371/539] Fix TrainingArguments documentation (#19162) * Fix TrainingArguments documentation * Fix TFTrainingArguments documentation --- src/transformers/training_args.py | 2 +- src/transformers/training_args_tf.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 623fa6246701f4..4eb8f47e14ef60 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -119,7 +119,6 @@ class OptimizerNames(ExplicitEnum): @dataclass class TrainingArguments: - framework = "pt" """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. @@ -500,6 +499,7 @@ class TrainingArguments: Whether to use Apple Silicon chip based `mps` device. """ + framework = "pt" output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) diff --git a/src/transformers/training_args_tf.py b/src/transformers/training_args_tf.py index fdae51f72d4b56..b3068b211a6d35 100644 --- a/src/transformers/training_args_tf.py +++ b/src/transformers/training_args_tf.py @@ -28,7 +28,6 @@ @dataclass class TFTrainingArguments(TrainingArguments): - framework = "tf" """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. @@ -162,6 +161,7 @@ class TFTrainingArguments(TrainingArguments): Whether to activate the XLA compilation or not. """ + framework = "tf" tpu_name: Optional[str] = field( default=None, metadata={"help": "Name of TPU"}, From 49629e7ba8ef68476e08b671d6fc71288c2f16f1 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Fri, 23 Sep 2022 21:13:35 +0800 Subject: [PATCH 372/539] fix HPO DDP GPU problem (#19168) Signed-off-by: Wang, Yi A Signed-off-by: Wang, Yi A --- src/transformers/integrations.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/transformers/integrations.py b/src/transformers/integrations.py index 787f76672a174a..94815d839e8703 100644 --- a/src/transformers/integrations.py +++ b/src/transformers/integrations.py @@ -23,6 +23,7 @@ import shutil import sys import tempfile +from dataclasses import asdict from pathlib import Path from typing import TYPE_CHECKING, Dict, Optional @@ -195,9 +196,10 @@ def _objective(trial, checkpoint_dir=None): if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP optuna HPO for ParallelMode.DISTRIBUTED currently.") torch.distributed.broadcast_object_list(args_main_rank, src=0) - local_rank = trainer.args.local_rank # backup the local_rank info - trainer.args = pickle.loads(bytes(args_main_rank)) - trainer.args.local_rank = local_rank + args = pickle.loads(bytes(args_main_rank)) + for key, value in asdict(args).items(): + if key != "local_rank": + setattr(trainer.args, key, value) trainer.train(resume_from_checkpoint=None) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: @@ -429,9 +431,10 @@ def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> Be if trainer.args.parallel_mode != ParallelMode.DISTRIBUTED: raise RuntimeError("only support DDP Sigopt HPO for ParallelMode.DISTRIBUTED currently.") torch.distributed.broadcast_object_list(args_main_rank, src=0) - local_rank = trainer.args.local_rank # backup the local_rank info - trainer.args = pickle.loads(bytes(args_main_rank)) - trainer.args.local_rank = local_rank + args = pickle.loads(bytes(args_main_rank)) + for key, value in asdict(args).items(): + if key != "local_rank": + setattr(trainer.args, key, value) trainer.train(resume_from_checkpoint=None) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: @@ -470,7 +473,6 @@ def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> Bes sweep_config["name"] = name def _objective(): - run = wandb.run if wandb.run else wandb.init() trainer.state.trial_name = run.name run.config.update({"assignments": {}, "metric": metric}) From 905635f5d36d3b2c8407005fa36601f5d99b03bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20Baumg=C3=A4rtner?= Date: Fri, 23 Sep 2022 15:14:53 +0200 Subject: [PATCH 373/539] [WIP] Trainer supporting evaluation on multiple datasets (#19158) * support for multiple eval datasets * support multiple datasets in seq2seq trainer * add documentation * update documentation * make fixup * revert option for multiple compute_metrics * revert option for multiple compute_metrics * revert added empty line --- src/transformers/trainer.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/transformers/trainer.py b/src/transformers/trainer.py index c1869ef76f0055..214e7a9789d2c7 100755 --- a/src/transformers/trainer.py +++ b/src/transformers/trainer.py @@ -238,9 +238,10 @@ class Trainer: `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally sets the seed of the RNGs used. - eval_dataset (`torch.utils.data.Dataset`, *optional*): + eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*): The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the - `model.forward()` method are automatically removed. + `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each + dataset prepending the dictionary key to the metric name. tokenizer ([`PreTrainedTokenizerBase`], *optional*): The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an @@ -2040,7 +2041,15 @@ def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for metrics = None if self.control.should_evaluate: - metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) + if isinstance(self.eval_dataset, dict): + for eval_dataset_name, eval_dataset in self.eval_dataset.items(): + metrics = self.evaluate( + eval_dataset=eval_dataset, + ignore_keys=ignore_keys_for_eval, + metric_key_prefix=f"eval_{eval_dataset_name}", + ) + else: + metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) self._report_to_hp_search(trial, self.state.global_step, metrics) if self.control.should_save: From 7e84723fe4e9a232e5e27dc38aed373c0c7ab94a Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Fri, 23 Sep 2022 16:24:28 +0300 Subject: [PATCH 374/539] Add semantic segmentation post-processing method to MobileViT (#19105) * add post-processing method for semantic segmentation Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- docs/source/en/model_doc/mobilevit.mdx | 1 + .../mobilevit/feature_extraction_mobilevit.py | 50 ++++++++++++++++++- .../mobilevit/test_modeling_mobilevit.py | 24 +++++++++ 3 files changed, 73 insertions(+), 2 deletions(-) diff --git a/docs/source/en/model_doc/mobilevit.mdx b/docs/source/en/model_doc/mobilevit.mdx index 5725bd5ce5835f..e0799d2962f2f7 100644 --- a/docs/source/en/model_doc/mobilevit.mdx +++ b/docs/source/en/model_doc/mobilevit.mdx @@ -66,6 +66,7 @@ This model was contributed by [matthijs](https://huggingface.co/Matthijs). The T [[autodoc]] MobileViTFeatureExtractor - __call__ + - post_process_semantic_segmentation ## MobileViTModel diff --git a/src/transformers/models/mobilevit/feature_extraction_mobilevit.py b/src/transformers/models/mobilevit/feature_extraction_mobilevit.py index 51e022b809c927..75bd6d51bc15ce 100644 --- a/src/transformers/models/mobilevit/feature_extraction_mobilevit.py +++ b/src/transformers/models/mobilevit/feature_extraction_mobilevit.py @@ -14,16 +14,19 @@ # limitations under the License. """Feature extractor class for MobileViT.""" -from typing import Optional, Union +from typing import List, Optional, Tuple, Union import numpy as np from PIL import Image from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...image_utils import ImageFeatureExtractionMixin, ImageInput, is_torch_tensor -from ...utils import TensorType, logging +from ...utils import TensorType, is_torch_available, logging +if is_torch_available(): + import torch + logger = logging.get_logger(__name__) @@ -151,3 +154,46 @@ def __call__( encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) return encoded_inputs + + def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple] = None): + """ + Converts the output of [`MobileViTForSemanticSegmentation`] into semantic segmentation maps. Only supports + PyTorch. + + Args: + outputs ([`MobileViTForSemanticSegmentation`]): + Raw outputs of the model. + target_sizes (`List[Tuple]`, *optional*): + A list of length `batch_size`, where each item is a `Tuple[int, int]` corresponding to the requested + final size (height, width) of each prediction. If left to None, predictions will not be resized. + Returns: + `List[torch.Tensor]`: + A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) + corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each + `torch.Tensor` correspond to a semantic class id. + """ + logits = outputs.logits + + # Resize logits and compute semantic segmentation maps + if target_sizes is not None: + if len(logits) != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + if is_torch_tensor(target_sizes): + target_sizes = target_sizes.numpy() + + semantic_segmentation = [] + + for idx in range(len(logits)): + resized_logits = torch.nn.functional.interpolate( + logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False + ) + semantic_map = resized_logits[0].argmax(dim=0) + semantic_segmentation.append(semantic_map) + else: + semantic_segmentation = logits.argmax(dim=1) + semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] + + return semantic_segmentation diff --git a/tests/models/mobilevit/test_modeling_mobilevit.py b/tests/models/mobilevit/test_modeling_mobilevit.py index 84ffc7b89bc54a..bb86cbc451fe61 100644 --- a/tests/models/mobilevit/test_modeling_mobilevit.py +++ b/tests/models/mobilevit/test_modeling_mobilevit.py @@ -340,3 +340,27 @@ def test_inference_semantic_segmentation(self): ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3], expected_slice, atol=1e-4)) + + @slow + def test_post_processing_semantic_segmentation(self): + model = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small") + model = model.to(torch_device) + + feature_extractor = MobileViTFeatureExtractor.from_pretrained("apple/deeplabv3-mobilevit-xx-small") + + image = prepare_img() + inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + outputs.logits = outputs.logits.detach().cpu() + + segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(50, 60)]) + expected_shape = torch.Size((50, 60)) + self.assertEqual(segmentation[0].shape, expected_shape) + + segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs) + expected_shape = torch.Size((32, 32)) + self.assertEqual(segmentation[0].shape, expected_shape) From fe01ec343b1ac20fbf18953146c8e81d4d821df1 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Fri, 23 Sep 2022 18:49:31 +0300 Subject: [PATCH 375/539] Detr preprocessor fix (#19007) * fix in-place preprocessing of inputs --- .../feature_extraction_conditional_detr.py | 8 ++++++++ .../deformable_detr/feature_extraction_deformable_detr.py | 8 ++++++++ src/transformers/models/detr/feature_extraction_detr.py | 8 ++++++++ 3 files changed, 24 insertions(+) diff --git a/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py index 96b9fa69db04e3..4467f2900bb5ee 100644 --- a/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py +++ b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py @@ -555,6 +555,12 @@ def __call__( if annotations is not None: annotations = [annotations] + # Create deep copies to avoid editing inputs in place + images = [image for image in images] + + if annotations is not None: + annotations = [annotation for annotation in annotations] + # prepare (COCO annotations as a list of Dict -> ConditionalDETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): @@ -587,6 +593,8 @@ def __call__( images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] + else: + images = [np.array(image) for image in images] if pad_and_return_pixel_mask: # pad images up to largest image in batch and create pixel_mask diff --git a/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py b/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py index 415df84fd196b8..61e7a70d9f1c23 100644 --- a/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py +++ b/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py @@ -555,6 +555,12 @@ def __call__( if annotations is not None: annotations = [annotations] + # Create deep copies to avoid editing inputs in place + images = [image for image in images] + + if annotations is not None: + annotations = [annotation for annotation in annotations] + # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): @@ -587,6 +593,8 @@ def __call__( images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] + else: + images = [np.array(image) for image in images] if pad_and_return_pixel_mask: # pad images up to largest image in batch and create pixel_mask diff --git a/src/transformers/models/detr/feature_extraction_detr.py b/src/transformers/models/detr/feature_extraction_detr.py index 4377bd6f8d60e9..2c0c68991be5d1 100644 --- a/src/transformers/models/detr/feature_extraction_detr.py +++ b/src/transformers/models/detr/feature_extraction_detr.py @@ -547,6 +547,12 @@ def __call__( if annotations is not None: annotations = [annotations] + # Create deep copies to avoid editing inputs in place + images = [image for image in images] + + if annotations is not None: + annotations = [annotation for annotation in annotations] + # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: for idx, (image, target) in enumerate(zip(images, annotations)): @@ -579,6 +585,8 @@ def __call__( images = [ self._normalize(image=image, mean=self.image_mean, std=self.image_std)[0] for image in images ] + else: + images = [np.array(image) for image in images] if pad_and_return_pixel_mask: # pad images up to largest image in batch and create pixel_mask From 49bf569830b19132d04125c1387d948dc57663dc Mon Sep 17 00:00:00 2001 From: Steven Anton Date: Fri, 23 Sep 2022 10:19:35 -0700 Subject: [PATCH 376/539] Add doctests to Perceiver examples (#19129) * Fix bug in example and add to tests * Fix failing tests * Check the size of logits * Code style * Try again... * Add expected loss for PerceiverForMaskedLM doctest Co-authored-by: Steven Anton Co-authored-by: ydshieh --- .../models/perceiver/modeling_perceiver.py | 37 ++++++++++++++++++- utils/documentation_tests.txt | 1 + 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/perceiver/modeling_perceiver.py b/src/transformers/models/perceiver/modeling_perceiver.py index d069182f06c3c7..5f7856d20200bd 100755 --- a/src/transformers/models/perceiver/modeling_perceiver.py +++ b/src/transformers/models/perceiver/modeling_perceiver.py @@ -801,6 +801,8 @@ def forward( >>> with torch.no_grad(): ... outputs = model(inputs=inputs) >>> logits = outputs.logits + >>> list(logits.shape) + [1, 2] >>> # to train, one can train the model using standard cross-entropy: >>> criterion = torch.nn.CrossEntropyLoss() @@ -810,6 +812,7 @@ def forward( >>> # EXAMPLE 2: using the Perceiver to classify images >>> # - we define an ImagePreprocessor, which can be used to embed images + >>> config = PerceiverConfig(image_size=224) >>> preprocessor = PerceiverImagePreprocessor( ... config, ... prep_type="conv1x1", @@ -844,6 +847,8 @@ def forward( >>> with torch.no_grad(): ... outputs = model(inputs=inputs) >>> logits = outputs.logits + >>> list(logits.shape) + [1, 2] >>> # to train, one can train the model using standard cross-entropy: >>> criterion = torch.nn.CrossEntropyLoss() @@ -1017,7 +1022,12 @@ def forward( >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss + >>> round(loss.item(), 2) + 19.87 + >>> logits = outputs.logits + >>> list(logits.shape) + [1, 2048, 262] >>> # inference >>> text = "This is an incomplete sentence where some words are missing." @@ -1030,6 +1040,8 @@ def forward( >>> with torch.no_grad(): ... outputs = model(**encoding) >>> logits = outputs.logits + >>> list(logits.shape) + [1, 2048, 262] >>> masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist() >>> tokenizer.decode(masked_tokens_predictions) @@ -1128,6 +1140,8 @@ def forward( >>> inputs = tokenizer(text, return_tensors="pt").input_ids >>> outputs = model(inputs=inputs) >>> logits = outputs.logits + >>> list(logits.shape) + [1, 2] ```""" if inputs is not None and input_ids is not None: raise ValueError("You cannot use both `inputs` and `input_ids`") @@ -1265,9 +1279,13 @@ def forward( >>> inputs = feature_extractor(images=image, return_tensors="pt").pixel_values >>> outputs = model(inputs=inputs) >>> logits = outputs.logits + >>> list(logits.shape) + [1, 1000] + >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) + Predicted class: tabby, tabby cat ```""" if inputs is not None and pixel_values is not None: raise ValueError("You cannot use both `inputs` and `pixel_values`") @@ -1402,9 +1420,13 @@ def forward( >>> inputs = feature_extractor(images=image, return_tensors="pt").pixel_values >>> outputs = model(inputs=inputs) >>> logits = outputs.logits + >>> list(logits.shape) + [1, 1000] + >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) + Predicted class: tabby, tabby cat ```""" if inputs is not None and pixel_values is not None: raise ValueError("You cannot use both `inputs` and `pixel_values`") @@ -1539,9 +1561,13 @@ def forward( >>> inputs = feature_extractor(images=image, return_tensors="pt").pixel_values >>> outputs = model(inputs=inputs) >>> logits = outputs.logits + >>> list(logits.shape) + [1, 1000] + >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = logits.argmax(-1).item() >>> print("Predicted class:", model.config.id2label[predicted_class_idx]) + Predicted class: tabby, tabby cat ```""" if inputs is not None and pixel_values is not None: raise ValueError("You cannot use both `inputs` and `pixel_values`") @@ -1689,6 +1715,8 @@ def forward( >>> patches = torch.randn(1, 2, 27, 368, 496) >>> outputs = model(inputs=patches) >>> logits = outputs.logits + >>> list(logits.shape) + [1, 368, 496, 2] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict @@ -1915,6 +1943,14 @@ def forward( >>> outputs = model(inputs=inputs, subsampled_output_points=subsampling) >>> logits = outputs.logits + >>> list(logits["audio"].shape) + [1, 240] + + >>> list(logits["image"].shape) + [1, 6272, 3] + + >>> list(logits["label"].shape) + [1, 700] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict @@ -2925,7 +2961,6 @@ def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str self.classifier = nn.Linear(in_channels, config.samples_per_patch) def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor: - logits = self.classifier(inputs) return torch.reshape(logits, [inputs.shape[0], -1]) diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 348cdb7af881aa..48fc71d6f6b2ef 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -58,6 +58,7 @@ src/transformers/models/opt/modeling_opt.py src/transformers/models/opt/modeling_tf_opt.py src/transformers/models/owlvit/modeling_owlvit.py src/transformers/models/pegasus/modeling_pegasus.py +src/transformers/models/perceiver/modeling_perceiver.py src/transformers/models/plbart/modeling_plbart.py src/transformers/models/poolformer/modeling_poolformer.py src/transformers/models/reformer/modeling_reformer.py From 0cea8d5555832cea111288dae18b32ecb16f7e2b Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 23 Sep 2022 19:23:05 +0200 Subject: [PATCH 377/539] Add offline runners info in the Slack report (#19169) * send slack report for offline runners Co-authored-by: ydshieh --- .github/workflows/check_runner_status.yml | 12 +++++++++++- utils/check_self_hosted_runner.py | 12 +++++++++++- utils/notification_service.py | 13 ++++++++++++- 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/.github/workflows/check_runner_status.yml b/.github/workflows/check_runner_status.yml index 2b61bfd971b2f7..9bc1616537763d 100644 --- a/.github/workflows/check_runner_status.yml +++ b/.github/workflows/check_runner_status.yml @@ -19,6 +19,8 @@ jobs: check_runner_status: name: Check Runner Status runs-on: ubuntu-latest + outputs: + offline_runners: ${{ steps.set-offline_runners.outputs.offline_runners }} steps: - name: Checkout transformers uses: actions/checkout@v2 @@ -26,7 +28,14 @@ jobs: fetch-depth: 2 - name: Check Runner Status - run: python utils/check_self_hosted_runner.py --target_runners single-gpu-ci-runner-docker,multi-gpu-ci-runner-docker,single-gpu-scheduled-ci-runner-docker,multi-scheduled-scheduled-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + run: python utils/check_self_hosted_runner.py --target_runners single-gpu-ci-runner-docker,multi-gpu-ci-runner-docker,single-gpu-scheduled-ci-runner-docker,multi-scheduled-scheduled-ci-runner-docker,single-gpu-doctest-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} + + - id: set-offline_runners + name: Set output for offline runners + if: ${{ always() }} + run: | + offline_runners=$(python3 -c 'fp = open("offline_runners.txt"); failed = fp.read(); fp.close(); print(failed)') + echo "::set-output name=offline_runners::$offline_runners" send_results: name: Send results to webhook @@ -50,6 +59,7 @@ jobs: CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }} CI_EVENT: runner status check RUNNER_STATUS: ${{ needs.check_runner_status.result }} + OFFLINE_RUNNERS: ${{ needs.check_runner_status.outputs.offline_runners }} # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. run: | diff --git a/utils/check_self_hosted_runner.py b/utils/check_self_hosted_runner.py index 47049d9925ee31..f7303366ea7840 100644 --- a/utils/check_self_hosted_runner.py +++ b/utils/check_self_hosted_runner.py @@ -5,6 +5,8 @@ def get_runner_status(target_runners, token): + offline_runners = [] + cmd = ( f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"' " https://api.github.com/repos/huggingface/transformers/actions/runners" @@ -17,7 +19,15 @@ def get_runner_status(target_runners, token): for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": - raise ValueError(f"{runner['name']} is offline!") + offline_runners.append(runner) + + # save the result so we can report them on Slack + with open("offline_runners.txt", "w") as fp: + fp.write(json.dumps(offline_runners)) + + if len(offline_runners) > 0: + failed = "\n".join(offline_runners) + raise ValueError(f"The following runners are offline:\n{failed}") if __name__ == "__main__": diff --git a/utils/notification_service.py b/utils/notification_service.py index 5b5fdd46f13f9d..d4b5479aecd504 100644 --- a/utils/notification_service.py +++ b/utils/notification_service.py @@ -397,8 +397,12 @@ def error_out(title, ci_title="", runner_not_available=False, runner_failed=Fals ci_title_block = {"type": "section", "text": {"type": "mrkdwn", "text": ci_title}} blocks.append(ci_title_block) + offline_runners = [] if runner_not_available: text = "💔 CI runners are not available! Tests are not run. 😭" + result = os.environ.get("OFFLINE_RUNNERS") + if result is not None: + offline_runners = json.loads(result) elif runner_failed: text = "💔 CI runners have problems! Tests are not run. 😭" elif setup_failed: @@ -413,11 +417,18 @@ def error_out(title, ci_title="", runner_not_available=False, runner_failed=Fals "text": text, }, } + + text = "" + if len(offline_runners) > 0: + text = "\n • " + "\n • ".join(offline_runners) + text = f"The following runners are offline:\n{text}\n\n" + text += "🙏 Let's fix it ASAP! 🙏" + error_block_2 = { "type": "section", "text": { "type": "plain_text", - "text": "🙏 Let's fix it ASAP! 🙏", + "text": text, }, "accessory": { "type": "button", From ece762443e4269e50cc1a348838e63e4a499575d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tianqi=20Zhang=20=28=E5=BC=A0=E5=A4=A9=E5=90=AF=29?= Date: Sat, 24 Sep 2022 01:52:27 +0800 Subject: [PATCH 378/539] Fix incorrect comments about atten mask for pytorch backend (#18728) * fix incorrect comments about atten mask * typo * Update for CodeGen Co-authored-by: ydshieh --- src/transformers/modeling_utils.py | 2 +- src/transformers/models/canine/modeling_canine.py | 2 +- src/transformers/models/codegen/modeling_codegen.py | 4 ++-- src/transformers/models/ctrl/modeling_ctrl.py | 2 +- .../decision_transformer/modeling_decision_transformer.py | 2 +- src/transformers/models/gpt2/modeling_gpt2.py | 2 +- src/transformers/models/gpt_neo/modeling_gpt_neo.py | 2 +- src/transformers/models/gpt_neox/modeling_gpt_neox.py | 2 +- src/transformers/models/gptj/modeling_gptj.py | 2 +- src/transformers/models/imagegpt/modeling_imagegpt.py | 2 +- src/transformers/models/lxmert/modeling_lxmert.py | 2 +- src/transformers/models/openai/modeling_openai.py | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index 79a8542d8b27a3..bb35bf7c80336e 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -756,7 +756,7 @@ def get_extended_attention_mask( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=dtype) # fp16 compatibility diff --git a/src/transformers/models/canine/modeling_canine.py b/src/transformers/models/canine/modeling_canine.py index 3b965bb9f2be7e..f05ba62c05db60 100644 --- a/src/transformers/models/canine/modeling_canine.py +++ b/src/transformers/models/canine/modeling_canine.py @@ -466,7 +466,7 @@ def forward( attention_mask = torch.unsqueeze(attention_mask, dim=1) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. attention_mask = (1.0 - attention_mask.float()) * torch.finfo(attention_scores.dtype).min # Apply the attention mask (precomputed for all layers in CanineModel forward() function) attention_scores = attention_scores + attention_mask diff --git a/src/transformers/models/codegen/modeling_codegen.py b/src/transformers/models/codegen/modeling_codegen.py index 06581e732cdcfe..6cfb0bb6266073 100644 --- a/src/transformers/models/codegen/modeling_codegen.py +++ b/src/transformers/models/codegen/modeling_codegen.py @@ -518,11 +518,11 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility - attention_mask = (1.0 - attention_mask) * -10000.0 + attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head diff --git a/src/transformers/models/ctrl/modeling_ctrl.py b/src/transformers/models/ctrl/modeling_ctrl.py index c091c201de8ae8..99a3ad5f97a52c 100644 --- a/src/transformers/models/ctrl/modeling_ctrl.py +++ b/src/transformers/models/ctrl/modeling_ctrl.py @@ -431,7 +431,7 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility diff --git a/src/transformers/models/decision_transformer/modeling_decision_transformer.py b/src/transformers/models/decision_transformer/modeling_decision_transformer.py index 723a6139cca1f5..0f4e0e38993628 100755 --- a/src/transformers/models/decision_transformer/modeling_decision_transformer.py +++ b/src/transformers/models/decision_transformer/modeling_decision_transformer.py @@ -571,7 +571,7 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility diff --git a/src/transformers/models/gpt2/modeling_gpt2.py b/src/transformers/models/gpt2/modeling_gpt2.py index 80bca016348e29..109abb215acbd8 100644 --- a/src/transformers/models/gpt2/modeling_gpt2.py +++ b/src/transformers/models/gpt2/modeling_gpt2.py @@ -805,7 +805,7 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility diff --git a/src/transformers/models/gpt_neo/modeling_gpt_neo.py b/src/transformers/models/gpt_neo/modeling_gpt_neo.py index c30db4e347f47c..0d03d227e27af8 100755 --- a/src/transformers/models/gpt_neo/modeling_gpt_neo.py +++ b/src/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -565,7 +565,7 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility diff --git a/src/transformers/models/gpt_neox/modeling_gpt_neox.py b/src/transformers/models/gpt_neox/modeling_gpt_neox.py index a3280137dcf04e..4379ff747b3b1a 100755 --- a/src/transformers/models/gpt_neox/modeling_gpt_neox.py +++ b/src/transformers/models/gpt_neox/modeling_gpt_neox.py @@ -484,7 +484,7 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility diff --git a/src/transformers/models/gptj/modeling_gptj.py b/src/transformers/models/gptj/modeling_gptj.py index cb05902ee42205..c20ebcb77dca64 100755 --- a/src/transformers/models/gptj/modeling_gptj.py +++ b/src/transformers/models/gptj/modeling_gptj.py @@ -606,7 +606,7 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility diff --git a/src/transformers/models/imagegpt/modeling_imagegpt.py b/src/transformers/models/imagegpt/modeling_imagegpt.py index 88bc042b21cc62..4d14fef4f08d59 100755 --- a/src/transformers/models/imagegpt/modeling_imagegpt.py +++ b/src/transformers/models/imagegpt/modeling_imagegpt.py @@ -757,7 +757,7 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility diff --git a/src/transformers/models/lxmert/modeling_lxmert.py b/src/transformers/models/lxmert/modeling_lxmert.py index 6ba852afcb1b65..43ad1e9cc29370 100644 --- a/src/transformers/models/lxmert/modeling_lxmert.py +++ b/src/transformers/models/lxmert/modeling_lxmert.py @@ -955,7 +955,7 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) diff --git a/src/transformers/models/openai/modeling_openai.py b/src/transformers/models/openai/modeling_openai.py index 2bd634abeb1154..6b1d7ef27a7515 100644 --- a/src/transformers/models/openai/modeling_openai.py +++ b/src/transformers/models/openai/modeling_openai.py @@ -475,7 +475,7 @@ def forward( # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for - # positions we want to attend and -10000.0 for masked positions. + # positions we want to attend and the dtype's smallest value for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility From 6395d1227fa709f9577db73e164e0fd3a8ac67f5 Mon Sep 17 00:00:00 2001 From: Fei Wang Date: Sat, 24 Sep 2022 02:35:19 +0800 Subject: [PATCH 379/539] Fixed type hint for pipelines/check_task (#19150) --- src/transformers/pipelines/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index fa2b4fb244b7e4..da0f3d4d83fcb2 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -385,7 +385,7 @@ def get_task(model: str, use_auth_token: Optional[str] = None) -> str: return task -def check_task(task: str) -> Tuple[Dict, Any]: +def check_task(task: str) -> Tuple[str, Dict, Any]: """ Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and default models if they exist. From 5da6afdd8da0a981452fd5c49d8a42908029abdd Mon Sep 17 00:00:00 2001 From: Enze Date: Sat, 24 Sep 2022 02:48:41 +0800 Subject: [PATCH 380/539] Update run_clip.py (#19130) The overwrite_cache parameter is declared twice. --- examples/pytorch/contrastive-image-text/run_clip.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index 797c1c1fb90d00..1296d0675fa9c6 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -161,9 +161,6 @@ class DataTrainingArguments: overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) - overwrite_cache: bool = field( - default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} - ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, From fa4eeb4fd342cdbad50d1eeacdd7d7d7bc23b080 Mon Sep 17 00:00:00 2001 From: flozi00 Date: Fri, 23 Sep 2022 20:52:09 +0200 Subject: [PATCH 381/539] german training, accelerate and model sharing (#19171) * correct spelling in README * processing * german training * accelerate * german model sharing * build doc * ttf links * casing --- docs/source/de/_toctree.yml | 6 + docs/source/de/accelerate.mdx | 132 ++++++++++ docs/source/de/model_sharing.mdx | 228 +++++++++++++++++ docs/source/de/training.mdx | 427 +++++++++++++++++++++++++++++++ 4 files changed, 793 insertions(+) create mode 100644 docs/source/de/accelerate.mdx create mode 100644 docs/source/de/model_sharing.mdx create mode 100644 docs/source/de/training.mdx diff --git a/docs/source/de/_toctree.yml b/docs/source/de/_toctree.yml index 7028e98ced540f..8b15c2c53e7c7f 100644 --- a/docs/source/de/_toctree.yml +++ b/docs/source/de/_toctree.yml @@ -13,4 +13,10 @@ title: Laden von vortrainierten Instanzen mit einer AutoClass - local: preprocessing title: Vorverarbeiten + - local: training + title: Optimierung eines vortrainierten Modells + - local: accelerate + title: Verteiltes Training mit 🤗 Accelerate + - local: model_sharing + title: Ein Modell teilen title: Tutorials diff --git a/docs/source/de/accelerate.mdx b/docs/source/de/accelerate.mdx new file mode 100644 index 00000000000000..64f85f205f8afb --- /dev/null +++ b/docs/source/de/accelerate.mdx @@ -0,0 +1,132 @@ + + +# Verteiltes Training mit 🤗 Accelerate + +Da die Modelle immer größer werden, hat sich die Parallelität als Strategie zum Trainieren größerer Modelle auf begrenzter Hardware und zur Beschleunigung der Trainingsgeschwindigkeit um mehrere Größenordnungen erwiesen. Bei Hugging Face haben wir die Bibliothek [🤗 Accelerate](https://huggingface.co/docs/accelerate) entwickelt, um Nutzern zu helfen, ein 🤗 Transformers-Modell auf jeder Art von verteiltem Setup zu trainieren, egal ob es sich um mehrere GPUs auf einer Maschine oder mehrere GPUs auf mehreren Maschinen handelt. In diesem Tutorial lernen Sie, wie Sie Ihre native PyTorch-Trainingsschleife anpassen, um das Training in einer verteilten Umgebung zu ermöglichen. + +## Einrichtung + +Beginnen Sie mit der Installation von 🤗 Accelerate: + +```bash +pip install accelerate +``` + +Dann importieren und erstellen Sie ein [`~accelerate.Accelerator`]-Objekt. Der [`~accelerate.Accelerator`] wird automatisch Ihre Art der verteilten Einrichtung erkennen und alle notwendigen Komponenten für das Training initialisieren. Sie müssen Ihr Modell nicht explizit auf einem Gerät platzieren. + +```py +>>> from accelerate import Accelerator + +>>> accelerator = Accelerator() +``` + +## Vorbereiten auf die Beschleunigung + +Der nächste Schritt ist die Übergabe aller relevanten Trainingsobjekte an die Methode [`~accelerate.Accelerator.prepare`]. Dazu gehören Ihre Trainings- und Evaluierungs-DataLoader, ein Modell und ein Optimierer: + +```py +>>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( +... train_dataloader, eval_dataloader, model, optimizer +... ) +``` + +## Rückwärts + +Die letzte Ergänzung besteht darin, das typische `loss.backward()` in der Trainingsschleife durch die 🤗 Accelerate-Methode [`~accelerate.Accelerator.backward`] zu ersetzen: + +```py +>>> for epoch in range(num_epochs): +... for batch in train_dataloader: +... outputs = model(**batch) +... loss = outputs.loss +... accelerator.backward(loss) + +... optimizer.step() +... lr_scheduler.step() +... optimizer.zero_grad() +... progress_bar.update(1) +``` + +Wie Sie im folgenden Code sehen können, müssen Sie nur vier zusätzliche Codezeilen zu Ihrer Trainingsschleife hinzufügen, um verteiltes Training zu ermöglichen! + +```diff ++ from accelerate import Accelerator + from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + ++ accelerator = Accelerator() + + model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) + optimizer = AdamW(model.parameters(), lr=3e-5) + +- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") +- model.to(device) + ++ train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ++ train_dataloader, eval_dataloader, model, optimizer ++ ) + + num_epochs = 3 + num_training_steps = num_epochs * len(train_dataloader) + lr_scheduler = get_scheduler( + "linear", + optimizer=optimizer, + num_warmup_steps=0, + num_training_steps=num_training_steps + ) + + progress_bar = tqdm(range(num_training_steps)) + + model.train() + for epoch in range(num_epochs): + for batch in train_dataloader: +- batch = {k: v.to(device) for k, v in batch.items()} + outputs = model(**batch) + loss = outputs.loss +- loss.backward() ++ accelerator.backward(loss) + + optimizer.step() + lr_scheduler.step() + optimizer.zero_grad() + progress_bar.update(1) +``` + +## Trainieren + +Sobald Sie die entsprechenden Codezeilen hinzugefügt haben, starten Sie Ihr Training in einem Skript oder einem Notebook wie Colaboratory. + +### Trainieren mit einem Skript + +Wenn Sie Ihr Training mit einem Skript durchführen, führen Sie den folgenden Befehl aus, um eine Konfigurationsdatei zu erstellen und zu speichern: + +```bash +accelerate config +``` + +Dann starten Sie Ihr Training mit: + +```bash +accelerate launch train.py +``` + +### Trainieren mit einem Notebook + +🤗 Accelerate kann auch in einem Notebook laufen, wenn Sie planen, die TPUs von Colaboratory zu verwenden. Verpacken Sie den gesamten Code, der für das Training verantwortlich ist, in eine Funktion und übergeben Sie diese an [`~accelerate.notebook_launcher`]: + +```py +>>> from accelerate import notebook_launcher + +>>> notebook_launcher(training_function) +``` + +Weitere Informationen über 🤗 Accelerate und seine umfangreichen Funktionen finden Sie in der [Dokumentation](https://huggingface.co/docs/accelerate). \ No newline at end of file diff --git a/docs/source/de/model_sharing.mdx b/docs/source/de/model_sharing.mdx new file mode 100644 index 00000000000000..50318595ffc207 --- /dev/null +++ b/docs/source/de/model_sharing.mdx @@ -0,0 +1,228 @@ + + +# Ein Modell teilen + +Die letzten beiden Tutorials haben gezeigt, wie man ein Modell mit PyTorch, Keras und 🤗 Accelerate für verteilte Setups feinabstimmen kann. Der nächste Schritt besteht darin, Ihr Modell mit der Community zu teilen! Bei Hugging Face glauben wir an den offenen Austausch von Wissen und Ressourcen, um künstliche Intelligenz für alle zu demokratisieren. Wir ermutigen Sie, Ihr Modell mit der Community zu teilen, um anderen zu helfen, Zeit und Ressourcen zu sparen. + +In diesem Tutorial lernen Sie zwei Methoden kennen, wie Sie ein trainiertes oder verfeinertes Modell auf dem [Model Hub](https://huggingface.co/models) teilen können: + +- Programmgesteuertes Übertragen Ihrer Dateien auf den Hub. +- Ziehen Sie Ihre Dateien per Drag-and-Drop über die Weboberfläche in den Hub. + + + + + +Um ein Modell mit der Öffentlichkeit zu teilen, benötigen Sie ein Konto auf [huggingface.co](https://huggingface.co/join). Sie können auch einer bestehenden Organisation beitreten oder eine neue Organisation gründen. + + + +## Repository-Funktionen + +Jedes Repository im Model Hub verhält sich wie ein typisches GitHub-Repository. Unsere Repositorys bieten Versionierung, Commit-Historie und die Möglichkeit, Unterschiede zu visualisieren. + +Die integrierte Versionierung des Model Hub basiert auf Git und [git-lfs](https://git-lfs.github.com/). Mit anderen Worten: Sie können ein Modell als ein Repository behandeln, was eine bessere Zugriffskontrolle und Skalierbarkeit ermöglicht. Die Versionskontrolle ermöglicht *Revisionen*, eine Methode zum Anheften einer bestimmten Version eines Modells mit einem Commit-Hash, Tag oder Branch. + +Folglich können Sie eine bestimmte Modellversion mit dem Parameter "Revision" laden: + +```py +>>> model = AutoModel.from_pretrained( +... "julien-c/EsperBERTo-small", revision="v2.0.1" # tag name, or branch name, or commit hash +... ) +``` + +Dateien lassen sich auch in einem Repository leicht bearbeiten, und Sie können die Commit-Historie sowie die Unterschiede einsehen: + +![vis_diff](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vis_diff.png) + +## Einrichtung + +Bevor Sie ein Modell für den Hub freigeben, benötigen Sie Ihre Hugging Face-Anmeldedaten. Wenn Sie Zugang zu einem Terminal haben, führen Sie den folgenden Befehl in der virtuellen Umgebung aus, in der 🤗 Transformers installiert ist. Dadurch werden Ihre Zugangsdaten in Ihrem Hugging Face-Cache-Ordner (standardmäßig `~/.cache/`) gespeichert: + +```bash +huggingface-cli login +``` + +Wenn Sie ein Notebook wie Jupyter oder Colaboratory verwenden, stellen Sie sicher, dass Sie die [`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library) Bibliothek installiert haben. Diese Bibliothek ermöglicht Ihnen die programmatische Interaktion mit dem Hub. + +```bash +pip install huggingface_hub +``` + +Verwenden Sie dann `notebook_login`, um sich beim Hub anzumelden, und folgen Sie dem Link [hier](https://huggingface.co/settings/token), um ein Token für die Anmeldung zu generieren: + +```py +>>> from huggingface_hub import notebook_login + +>>> notebook_login() +``` + +## Ein Modell für alle Frameworks konvertieren + +Um sicherzustellen, dass Ihr Modell von jemandem verwendet werden kann, der mit einem anderen Framework arbeitet, empfehlen wir Ihnen, Ihr Modell sowohl mit PyTorch- als auch mit TensorFlow-Checkpoints zu konvertieren und hochzuladen. Während Benutzer immer noch in der Lage sind, Ihr Modell von einem anderen Framework zu laden, wenn Sie diesen Schritt überspringen, wird es langsamer sein, weil 🤗 Transformers den Checkpoint on-the-fly konvertieren müssen. + +Die Konvertierung eines Checkpoints für ein anderes Framework ist einfach. Stellen Sie sicher, dass Sie PyTorch und TensorFlow installiert haben (siehe [hier](installation) für Installationsanweisungen), und finden Sie dann das spezifische Modell für Ihre Aufgabe in dem anderen Framework. + + + +Geben Sie `from_tf=True` an, um einen Prüfpunkt von TensorFlow nach PyTorch zu konvertieren: + +```py +>>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True) +>>> pt_model.save_pretrained("path/to/awesome-name-you-picked") +``` + + +Geben Sie `from_pt=True` an, um einen Prüfpunkt von PyTorch nach TensorFlow zu konvertieren: + +```py +>>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_pt=True) +``` + +Dann können Sie Ihr neues TensorFlow-Modell mit seinem neuen Checkpoint speichern: + +```py +>>> tf_model.save_pretrained("path/to/awesome-name-you-picked") +``` + + +Wenn ein Modell in Flax verfügbar ist, können Sie auch einen Kontrollpunkt von PyTorch nach Flax konvertieren: + +```py +>>> flax_model = FlaxDistilBertForSequenceClassification.from_pretrained( +... "path/to/awesome-name-you-picked", from_pt=True +... ) +``` + + + +## Ein Modell während des Trainings hochladen + + + + + +Die Weitergabe eines Modells an den Hub ist so einfach wie das Hinzufügen eines zusätzlichen Parameters oder Rückrufs. Erinnern Sie sich an das [Feinabstimmungs-Tutorial](training), in der Klasse [`TrainingArguments`] geben Sie Hyperparameter und zusätzliche Trainingsoptionen an. Eine dieser Trainingsoptionen beinhaltet die Möglichkeit, ein Modell direkt an den Hub zu pushen. Setzen Sie `push_to_hub=True` in Ihrer [`TrainingArguments`]: + +```py +>>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True) +``` + +Übergeben Sie Ihre Trainingsargumente wie gewohnt an [`Trainer`]: + +```py +>>> trainer = Trainer( +... model=model, +... args=training_args, +... train_dataset=small_train_dataset, +... eval_dataset=small_eval_dataset, +... compute_metrics=compute_metrics, +... ) +``` + +Nach der Feinabstimmung Ihres Modells rufen Sie [`~transformers.Trainer.push_to_hub`] auf [`Trainer`] auf, um das trainierte Modell an den Hub zu übertragen. Transformers fügt sogar automatisch Trainings-Hyperparameter, Trainingsergebnisse und Framework-Versionen zu Ihrer Modellkarte hinzu! + +```py +>>> trainer.push_to_hub() +``` + + +Geben Sie ein Modell mit [`PushToHubCallback`] an den Hub weiter. In der [`PushToHubCallback`] Funktion, fügen Sie hinzu: + +- Ein Ausgabeverzeichnis für Ihr Modell. +- Einen Tokenizer. +- Die `hub_model_id`, die Ihr Hub-Benutzername und Modellname ist. + +```py +>>> from transformers.keras.callbacks import PushToHubCallback + +>>> push_to_hub_callback = PushToHubCallback( +... output_dir="./your_model_save_path", tokenizer=tokenizer, hub_model_id="your-username/my-awesome-model" +... ) +``` + +Fügen Sie den Callback zu [`fit`](https://keras.io/api/models/model_training_apis/) hinzu, und 🤗 Transformers wird das trainierte Modell an den Hub weiterleiten: + +```py +>>> model.fit(tf_train_dataset, validation_data=tf_validation_dataset, epochs=3, callbacks=push_to_hub_callback) +``` + + + +## Verwenden Sie die Funktion `push_to_hub`. + +Sie können `push_to_hub` auch direkt für Ihr Modell aufrufen, um es in den Hub hochzuladen. + +Geben Sie den Namen Ihres Modells in "push_to_hub" an: + +```py +>>> pt_model.push_to_hub("my-awesome-model") +``` + +Dadurch wird ein Repository unter Ihrem Benutzernamen mit dem Modellnamen `my-awesome-model` erstellt. Benutzer können nun Ihr Modell mit der Funktion `from_pretrained` laden: + +```py +>>> from transformers import AutoModel + +>>> model = AutoModel.from_pretrained("your_username/my-awesome-model") +``` + +Wenn Sie zu einer Organisation gehören und Ihr Modell stattdessen unter dem Namen der Organisation pushen wollen, fügen Sie diesen einfach zur `repo_id` hinzu: + +```py +>>> pt_model.push_to_hub("my-awesome-org/my-awesome-model") +``` + +Die Funktion "push_to_hub" kann auch verwendet werden, um andere Dateien zu einem Modell-Repository hinzuzufügen. Zum Beispiel kann man einen Tokenizer zu einem Modell-Repository hinzufügen: + +```py +>>> tokenizer.push_to_hub("my-awesome-model") +``` + +Oder vielleicht möchten Sie die TensorFlow-Version Ihres fein abgestimmten PyTorch-Modells hinzufügen: + +```py +>>> tf_model.push_to_hub("my-awesome-model") +``` + +Wenn Sie nun zu Ihrem Hugging Face-Profil navigieren, sollten Sie Ihr neu erstelltes Modell-Repository sehen. Wenn Sie auf die Registerkarte **Dateien** klicken, werden alle Dateien angezeigt, die Sie in das Repository hochgeladen haben. + +Weitere Einzelheiten zum Erstellen und Hochladen von Dateien in ein Repository finden Sie in der Hub-Dokumentation [hier](https://huggingface.co/docs/hub/how-to-upstream). + +## Hochladen mit der Weboberfläche + +Benutzer, die einen no-code Ansatz bevorzugen, können ein Modell über das Webinterface des Hubs hochladen. Besuchen Sie [huggingface.co/new](https://huggingface.co/new) um ein neues Repository zu erstellen: + +![new_model_repo](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/new_model_repo.png) + +Fügen Sie von hier aus einige Informationen über Ihr Modell hinzu: + +- Wählen Sie den **Besitzer** des Repositorys. Dies können Sie selbst oder eine der Organisationen sein, denen Sie angehören. +- Wählen Sie einen Namen für Ihr Modell, der auch der Name des Repositorys sein wird. +- Wählen Sie, ob Ihr Modell öffentlich oder privat ist. +- Geben Sie die Lizenzverwendung für Ihr Modell an. + +Klicken Sie nun auf die Registerkarte **Dateien** und klicken Sie auf die Schaltfläche **Datei hinzufügen**, um eine neue Datei in Ihr Repository hochzuladen. Ziehen Sie dann eine Datei per Drag-and-Drop hoch und fügen Sie eine Übergabemeldung hinzu. + +![upload_file](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/upload_file.png) + +## Hinzufügen einer Modellkarte + +Um sicherzustellen, dass die Benutzer die Fähigkeiten, Grenzen, möglichen Verzerrungen und ethischen Aspekte Ihres Modells verstehen, fügen Sie bitte eine Modellkarte zu Ihrem Repository hinzu. Die Modellkarte wird in der Datei `README.md` definiert. Sie können eine Modellkarte hinzufügen, indem Sie: + +* Manuelles Erstellen und Hochladen einer "README.md"-Datei. +* Klicken Sie auf die Schaltfläche **Modellkarte bearbeiten** in Ihrem Modell-Repository. + +Werfen Sie einen Blick auf die DistilBert [model card](https://huggingface.co/distilbert-base-uncased) als gutes Beispiel für die Art von Informationen, die eine Modellkarte enthalten sollte. Weitere Details über andere Optionen, die Sie in der Datei "README.md" einstellen können, wie z.B. den Kohlenstoff-Fußabdruck eines Modells oder Beispiele für Widgets, finden Sie in der Dokumentation [hier](https://huggingface.co/docs/hub/models-cards). \ No newline at end of file diff --git a/docs/source/de/training.mdx b/docs/source/de/training.mdx new file mode 100644 index 00000000000000..a4b762a34e482c --- /dev/null +++ b/docs/source/de/training.mdx @@ -0,0 +1,427 @@ + + +# Optimierung eines vortrainierten Modells + +[[open-in-colab]] + +Die Verwendung eines vorab trainierten Modells hat erhebliche Vorteile. Es reduziert die Rechenkosten und den CO2-Fußabdruck und ermöglicht Ihnen die Verwendung von Modellen, die dem neuesten Stand der Technik entsprechen, ohne dass Sie ein Modell von Grund auf neu trainieren müssen. Transformers bietet Zugang zu Tausenden von vortrainierten Modellen für eine Vielzahl von Aufgaben. Wenn Sie ein vorab trainiertes Modell verwenden, trainieren Sie es auf einem für Ihre Aufgabe spezifischen Datensatz. Dies wird als Feinabstimmung bezeichnet und ist eine unglaublich leistungsfähige Trainingstechnik. In diesem Tutorial werden Sie ein vortrainiertes Modell mit einem Deep-Learning-Framework Ihrer Wahl feinabstimmen: + +* Feinabstimmung eines vorab trainierten Modells mit 🤗 Transformers [`Trainer`]. +* Feinabstimmung eines vorab trainierten Modells in TensorFlow mit Keras. +* Feinabstimmung eines vorab trainierten Modells in nativem PyTorch. + + + +## Vorbereitung eines Datensatzes + + + +Bevor Sie die Feinabstimmung eines vortrainierten Modells vornehmen können, müssen Sie einen Datensatz herunterladen und für das Training vorbereiten. Im vorangegangenen Leitfaden haben Sie gelernt, wie man Daten für das Training aufbereitet, und jetzt haben Sie die Gelegenheit, diese Fähigkeiten zu testen! + +Laden Sie zunächst den Datensatz [Yelp Reviews](https://huggingface.co/datasets/yelp_review_full): + +```py +>>> from datasets import load_dataset + +>>> dataset = load_dataset("yelp_review_full") +>>> dataset["train"][100] +{'label': 0, + 'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'} +``` + +Wie Sie nun wissen, benötigen Sie einen Tokenizer, um den Text zu verarbeiten und eine Auffüll- und Abschneidungsstrategie einzubauen, um mit variablen Sequenzlängen umzugehen. Um Ihren Datensatz in einem Schritt zu verarbeiten, verwenden Sie die 🤗 Methode Datasets [`map`](https://huggingface.co/docs/datasets/process.html#map), um eine Vorverarbeitungsfunktion auf den gesamten Datensatz anzuwenden: + +```py +>>> from transformers import AutoTokenizer + +>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") + + +>>> def tokenize_function(examples): +... return tokenizer(examples["text"], padding="max_length", truncation=True) + + +>>> tokenized_datasets = dataset.map(tokenize_function, batched=True) +``` + +Wenn Sie möchten, können Sie eine kleinere Teilmenge des gesamten Datensatzes für die Feinabstimmung erstellen, um den Zeitaufwand zu verringern: + +```py +>>> small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000)) +>>> small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000)) +``` + + + +## Training + +An dieser Stelle sollten Sie dem Abschnitt folgen, der dem Rahmen entspricht, den Sie verwenden möchten. Sie können über die Links +in der rechten Seitenleiste können Sie zu dem gewünschten Abschnitt springen - und wenn Sie den gesamten Inhalt eines bestimmten Frameworks ausblenden möchten, +klicken Sie einfach auf die Schaltfläche oben rechts im Block des jeweiligen Frameworks! + + + + + +## Trainieren mit PyTorch Trainer + +🤗 Transformers bietet eine [`Trainer`]-Klasse, die für das Training von 🤗 Transformers-Modellen optimiert ist und es einfacher macht, mit dem Training zu beginnen, ohne manuell eine eigene Trainingsschleife zu schreiben. Die [`Trainer`]-API unterstützt eine breite Palette von Trainingsoptionen und Funktionen wie Logging, Gradientenakkumulation und gemischte Präzision. + +Beginnen Sie mit dem Laden Ihres Modells und geben Sie die Anzahl der erwarteten Labels an. Aus dem Yelp Review [dataset card](https://huggingface.co/datasets/yelp_review_full#data-fields) wissen Sie, dass es fünf Labels gibt: + +```py +>>> from transformers import AutoModelForSequenceClassification + +>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5) +``` + + + +Es wird eine Warnung angezeigt, dass einige der trainierten Parameter nicht verwendet werden und einige Parameter zufällig +initialisiert werden. Machen Sie sich keine Sorgen, das ist völlig normal! Der vorher trainierte Kopf des BERT-Modells wird verworfen und durch einen zufällig initialisierten Klassifikationskopf ersetzt. Sie werden diesen neuen Modellkopf in Ihrer Sequenzklassifizierungsaufgabe feinabstimmen, indem Sie das Wissen des vortrainierten Modells auf ihn übertragen. + + + +### Hyperparameter für das Training + +Als Nächstes erstellen Sie eine Klasse [`TrainingArguments`], die alle Hyperparameter enthält, die Sie einstellen können, sowie Flags zur Aktivierung verschiedener Trainingsoptionen. Für dieses Lernprogramm können Sie mit den Standard- [Hyperparametern](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) beginnen, aber Sie können mit diesen experimentieren, um Ihre optimalen Einstellungen zu finden. + +Geben Sie an, wo die Kontrollpunkte Ihres Trainings gespeichert werden sollen: + +```py +>>> from transformers import TrainingArguments + +>>> training_args = TrainingArguments(output_dir="test_trainer") +``` + +### Auswerten + +Der [`Trainer`] wertet die Leistung des Modells während des Trainings nicht automatisch aus. Sie müssen [`Trainer`] eine Funktion übergeben, um Metriken zu berechnen und zu berichten. Die [🤗 Evaluate](https://huggingface.co/docs/evaluate/index) Bibliothek bietet eine einfache [`accuracy`](https://huggingface.co/spaces/evaluate-metric/accuracy) Funktion, die Sie mit der [`evaluate.load`] Funktion laden können (siehe diese [quicktour](https://huggingface.co/docs/evaluate/a_quick_tour) für weitere Informationen): + +```py +>>> import numpy as np +>>> import evaluate + +>>> metric = evaluate.load("accuracy") +``` + +Rufen Sie [`~evaluate.compute`] auf `metric` auf, um die Genauigkeit Ihrer Vorhersagen zu berechnen. Bevor Sie Ihre Vorhersagen an `compute` übergeben, müssen Sie die Vorhersagen in Logits umwandeln (denken Sie daran, dass alle 🤗 Transformers-Modelle Logits zurückgeben): + +```py +>>> def compute_metrics(eval_pred): +... logits, labels = eval_pred +... predictions = np.argmax(logits, axis=-1) +... return metric.compute(predictions=predictions, references=labels) +``` + +Wenn Sie Ihre Bewertungsmetriken während der Feinabstimmung überwachen möchten, geben Sie den Parameter `evaluation_strategy` in Ihren Trainingsargumenten an, um die Bewertungsmetrik am Ende jeder Epoche zu ermitteln: + +```py +>>> from transformers import TrainingArguments, Trainer + +>>> training_args = TrainingArguments(output_dir="test_trainer", evaluation_strategy="epoch") +``` + +### Trainer + +Erstellen Sie ein [`Trainer`]-Objekt mit Ihrem Modell, Trainingsargumenten, Trainings- und Testdatensätzen und einer Evaluierungsfunktion: + +```py +>>> trainer = Trainer( +... model=model, +... args=training_args, +... train_dataset=small_train_dataset, +... eval_dataset=small_eval_dataset, +... compute_metrics=compute_metrics, +... ) +``` + +Anschließend können Sie Ihr Modell durch den Aufruf von [`~transformers.Trainer.train`] optimieren: + +```py +>>> trainer.train() +``` + + + + + + +## Trainieren Sie ein TensorFlow-Modell mit Keras + +Sie können auch 🤗 Transformers Modelle in TensorFlow mit der Keras API trainieren! + +### Laden von Daten für Keras + +Wenn Sie ein 🤗 Transformers Modell mit der Keras API trainieren wollen, müssen Sie Ihren Datensatz in ein Format konvertieren, das +Keras versteht. Wenn Ihr Datensatz klein ist, können Sie das Ganze einfach in NumPy-Arrays konvertieren und an Keras übergeben. +Probieren wir das zuerst aus, bevor wir etwas Komplizierteres tun. + +Laden Sie zunächst ein Dataset. Wir werden den CoLA-Datensatz aus dem [GLUE-Benchmark](https://huggingface.co/datasets/glue) verwenden, +da es sich um eine einfache Aufgabe zur Klassifizierung von binärem Text handelt, und nehmen vorerst nur den Trainingssplit. + +```py +from datasets import load_dataset + +dataset = load_dataset("glue", "cola") +dataset = dataset["train"] # Just take the training split for now +``` + +Als nächstes laden Sie einen Tokenizer und tokenisieren die Daten als NumPy-Arrays. Beachten Sie, dass die Beschriftungen bereits eine Liste von 0 und 1en sind, +Wir können sie also ohne Tokenisierung direkt in ein NumPy-Array konvertieren! + +```py +from transformers import AutoTokenizer + +tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") +tokenized_data = tokenizer(dataset["text"], return_tensors="np", padding=True) + +labels = np.array(dataset["label"]) # Label is already an array of 0 and 1 +``` + +Schließlich laden, [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) und [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) Sie das Modell: + +```py +from transformers import TFAutoModelForSequenceClassification +from tensorflow.keras.optimizers import Adam + +# Load and compile our model +model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased") +# Lower learning rates are often better for fine-tuning transformers +model.compile(optimizer=Adam(3e-5)) + +model.fit(tokenized_data, labels) +``` + + + +Sie müssen Ihren Modellen kein Verlustargument übergeben, wenn Sie sie `compile()`! Hugging-Face-Modelle wählen automatisch +einen Loss, der für ihre Aufgabe und Modellarchitektur geeignet ist, wenn dieses Argument leer gelassen wird. Sie können jederzeit außer Kraft setzen, indem Sie selbst einen Loss angeben, wenn Sie das möchten! + + + +Dieser Ansatz eignet sich hervorragend für kleinere Datensätze, aber bei größeren Datensätzen kann er zu einem Problem werden. Warum? +Weil das tokenisierte Array und die Beschriftungen vollständig in den Speicher geladen werden müssten, und weil NumPy nicht mit +"gezackte" Arrays nicht verarbeiten kann, so dass jedes tokenisierte Sample auf die Länge des längsten Samples im gesamten Datensatz aufgefüllt werden müsste. +Datensatzes aufgefüllt werden. Dadurch wird das Array noch größer, und all die aufgefüllten Token verlangsamen auch das Training! + +### Laden von Daten als tf.data.Dataset + +Wenn Sie eine Verlangsamung des Trainings vermeiden wollen, können Sie Ihre Daten stattdessen als `tf.data.Dataset` laden. Sie können zwar Ihre eigene +tf.data"-Pipeline schreiben können, wenn Sie wollen, haben wir zwei bequeme Methoden, um dies zu tun: + +- [`~TFPreTrainedModel.prepare_tf_dataset`]: Dies ist die Methode, die wir in den meisten Fällen empfehlen. Da es sich um eine Methode +Ihres Modells ist, kann sie das Modell inspizieren, um automatisch herauszufinden, welche Spalten als Modelleingaben verwendet werden können, und +verwirft die anderen, um einen einfacheren, leistungsfähigeren Datensatz zu erstellen. +- [~datasets.Dataset.to_tf_dataset`]: Diese Methode ist eher auf niedriger Ebene angesiedelt und ist nützlich, wenn Sie genau kontrollieren wollen, wie +Dataset erstellt wird, indem man genau angibt, welche `columns` und `label_cols` einbezogen werden sollen. + +Bevor Sie [~TFPreTrainedModel.prepare_tf_dataset`] verwenden können, müssen Sie die Tokenizer-Ausgaben als Spalten zu Ihrem Datensatz hinzufügen, wie in +dem folgenden Codebeispiel: + +```py +def tokenize_dataset(data): + # Keys of the returned dictionary will be added to the dataset as columns + return tokenizer(data["text"]) + + +dataset = dataset.map(tokenize_dataset) +``` + +Denken Sie daran, dass Hugging Face-Datensätze standardmäßig auf der Festplatte gespeichert werden, so dass dies nicht zu einem erhöhten Arbeitsspeicherbedarf führen wird! Sobald die +Spalten hinzugefügt wurden, können Sie Batches aus dem Datensatz streamen und zu jedem Batch Auffüllungen hinzufügen, was die Anzahl der Auffüllungs-Token im Vergleich zum Auffüllen des gesamten Datensatzes reduziert. + + +```py +>>> tf_dataset = model.prepare_tf_dataset(dataset, batch_size=16, shuffle=True, tokenizer=tokenizer) +``` + +Beachten Sie, dass Sie im obigen Codebeispiel den Tokenizer an `prepare_tf_dataset` übergeben müssen, damit die Stapel beim Laden korrekt aufgefüllt werden können. +Wenn alle Stichproben in Ihrem Datensatz die gleiche Länge haben und kein Auffüllen erforderlich ist, können Sie dieses Argument weglassen. +Wenn Sie etwas Komplexeres als nur das Auffüllen von Stichproben benötigen (z. B. das Korrumpieren von Token für die maskierte Sprachmodellierung), können Sie das Argument +Modellierung), können Sie stattdessen das Argument `collate_fn` verwenden, um eine Funktion zu übergeben, die aufgerufen wird, um die +Liste von Stichproben in einen Stapel umwandelt und alle gewünschten Vorverarbeitungen vornimmt. Siehe unsere +[examples](https://github.com/huggingface/transformers/tree/main/examples) oder +[notebooks](https://huggingface.co/docs/transformers/notebooks), um diesen Ansatz in Aktion zu sehen. + +Sobald Sie einen `tf.data.Dataset` erstellt haben, können Sie das Modell wie zuvor kompilieren und anpassen: + +```py +model.compile(optimizer=Adam(3e-5)) + +model.fit(tf_dataset) +``` + + + + + + +## Trainieren in nativem PyTorch + + + + + +[`Trainer`] kümmert sich um die Trainingsschleife und ermöglicht die Feinabstimmung eines Modells in einer einzigen Codezeile. Für Benutzer, die es vorziehen, ihre eigene Trainingsschleife zu schreiben, können Sie auch eine Feinabstimmung eines 🤗 Transformers-Modells in nativem PyTorch vornehmen. + +An diesem Punkt müssen Sie möglicherweise Ihr Notebook neu starten oder den folgenden Code ausführen, um etwas Speicher freizugeben: + +```py +del model +del pytorch_model +del trainer +torch.cuda.empty_cache() +``` + +Als Nächstes müssen Sie den Datensatz `tokenized_dataset` manuell nachbearbeiten, um ihn für das Training vorzubereiten. + +1. Entfernen Sie die Spalte "Text", da das Modell keinen Rohtext als Eingabe akzeptiert: + + ```py + >>> tokenized_datasets = tokenized_datasets.remove_columns(["text"]) + ``` + +2. Benennen Sie die Spalte "Label" in "Labels" um, da das Modell erwartet, dass das Argument "Labels" genannt wird: + + ```py + >>> tokenized_datasets = tokenized_datasets.rename_column("label", "labels") + ``` + +3. Stellen Sie das Format des Datensatzes so ein, dass PyTorch-Tensoren anstelle von Listen zurückgegeben werden: + + ```py + >>> tokenized_datasets.set_format("torch") + ``` + +Erstellen Sie dann eine kleinere Teilmenge des Datensatzes, wie zuvor gezeigt, um die Feinabstimmung zu beschleunigen: + +```py +>>> small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000)) +>>> small_eval_dataset = tokenized_datasets["test"].shuffle(seed=42).select(range(1000)) +``` + +### DataLoader + +Erstellen Sie einen `DataLoader` für Ihre Trainings- und Testdatensätze, damit Sie über die Datenstapel iterieren können: + +```py +>>> from torch.utils.data import DataLoader + +>>> train_dataloader = DataLoader(small_train_dataset, shuffle=True, batch_size=8) +>>> eval_dataloader = DataLoader(small_eval_dataset, batch_size=8) +``` + +Laden Sie Ihr Modell mit der Anzahl der erwarteten Kennzeichnungen: + +```py +>>> from transformers import AutoModelForSequenceClassification + +>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5) +``` + +### Optimierer und Lernratensteuerung + +Erstellen Sie einen Optimierer und einen Scheduler für die Lernrate, um das Modell fein abzustimmen. Wir verwenden den Optimierer [`AdamW`](https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html) aus PyTorch: + +```py +>>> from torch.optim import AdamW + +>>> optimizer = AdamW(model.parameters(), lr=5e-5) +``` + +Erstellen Sie den Standard-Lernratenplaner aus [`Trainer`]: + +```py +>>> from transformers import get_scheduler + +>>> num_epochs = 3 +>>> num_training_steps = num_epochs * len(train_dataloader) +>>> lr_scheduler = get_scheduler( +... name="linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps +... ) +``` + +Geben Sie schließlich `device` an, um einen Grafikprozessor zu verwenden, wenn Sie Zugang zu einem solchen haben. Andernfalls kann das Training auf einer CPU mehrere Stunden statt ein paar Minuten dauern. + +```py +>>> import torch + +>>> device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") +>>> model.to(device) +``` + + + +Holen Sie sich mit einem gehosteten Notebook wie [Colaboratory](https://colab.research.google.com/) oder [SageMaker StudioLab](https://studiolab.sagemaker.aws/) kostenlosen Zugang zu einem Cloud-GPU, wenn Sie noch keinen haben. + + + +Großartig, Sie sind bereit für das Training! 🥳 + +### Trainingsschleife + +Um Ihren Trainingsfortschritt zu verfolgen, verwenden Sie die [tqdm](https://tqdm.github.io/) Bibliothek, um einen Fortschrittsbalken über die Anzahl der Trainingsschritte hinzuzufügen: + +```py +>>> from tqdm.auto import tqdm + +>>> progress_bar = tqdm(range(num_training_steps)) + +>>> model.train() +>>> for epoch in range(num_epochs): +... for batch in train_dataloader: +... batch = {k: v.to(device) for k, v in batch.items()} +... outputs = model(**batch) +... loss = outputs.loss +... loss.backward() + +... optimizer.step() +... lr_scheduler.step() +... optimizer.zero_grad() +... progress_bar.update(1) +``` + +### Auswertung + +Genauso wie Sie eine Bewertungsfunktion zu [`Trainer`] hinzugefügt haben, müssen Sie dasselbe tun, wenn Sie Ihre eigene Trainingsschleife schreiben. Aber anstatt die Metrik am Ende jeder Epoche zu berechnen und zu melden, werden Sie dieses Mal alle Stapel mit [`~evaluate.add_batch`] akkumulieren und die Metrik ganz am Ende berechnen. + +```py +>>> import evaluate + +>>> metric = evaluate.load("accuracy") +>>> model.eval() +>>> for batch in eval_dataloader: +... batch = {k: v.to(device) for k, v in batch.items()} +... with torch.no_grad(): +... outputs = model(**batch) + +... logits = outputs.logits +... predictions = torch.argmax(logits, dim=-1) +... metric.add_batch(predictions=predictions, references=batch["labels"]) + +>>> metric.compute() +``` + + + + + +## Zusätzliche Ressourcen + +Weitere Beispiele für die Feinabstimmung finden Sie unter: + +- [🤗 Transformers Examples](https://github.com/huggingface/transformers/tree/main/examples) enthält Skripte + um gängige NLP-Aufgaben in PyTorch und TensorFlow zu trainieren. + +- [🤗 Transformers Notebooks](notebooks) enthält verschiedene Notebooks zur Feinabstimmung eines Modells für bestimmte Aufgaben in PyTorch und TensorFlow. \ No newline at end of file From 71fc33174664738d8c8d93025ebc810180e69c20 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 26 Sep 2022 10:55:42 +0200 Subject: [PATCH 382/539] Separate Push CI images from Scheduled CI (#19170) * separate images * Fix condition Co-authored-by: ydshieh --- .github/workflows/build-docker-images.yml | 18 ++++++++++++++++-- .github/workflows/self-push-caller.yml | 2 ++ .github/workflows/self-push.yml | 12 ++++++------ 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml index 2d4dfc9f044856..88934cbcbc5bb4 100644 --- a/.github/workflows/build-docker-images.yml +++ b/.github/workflows/build-docker-images.yml @@ -6,6 +6,10 @@ on: - docker-image* repository_dispatch: workflow_call: + inputs: + image_postfix: + required: true + type: string schedule: - cron: "0 1 * * *" @@ -38,10 +42,12 @@ jobs: build-args: | REF=main push: true - tags: huggingface/transformers-all-latest-gpu + tags: huggingface/transformers-all-latest-gpu${{ inputs.image_postfix }} latest-with-torch-nightly-docker: name: "Nightly PyTorch + Stable TensorFlow" + # Push CI doesn't need this image + if: inputs.image_postfix != '-push-ci' runs-on: ubuntu-latest steps: - @@ -91,10 +97,12 @@ jobs: build-args: | REF=main push: true - tags: huggingface/transformers-pytorch-deepspeed-latest-gpu + tags: huggingface/transformers-pytorch-deepspeed-latest-gpu${{ inputs.image_postfix }} nightly-torch-deepspeed-docker: name: "Nightly PyTorch + DeepSpeed" + # Push CI doesn't need this image + if: inputs.image_postfix != '-push-ci' runs-on: ubuntu-latest steps: - @@ -121,6 +129,8 @@ jobs: doc-builder: name: "Doc builder" + # Push CI doesn't need this image + if: inputs.image_postfix != '-push-ci' runs-on: ubuntu-latest steps: - @@ -145,6 +155,8 @@ jobs: latest-pytorch: name: "Latest PyTorch [dev]" + # Push CI doesn't need this image + if: inputs.image_postfix != '-push-ci' runs-on: ubuntu-latest steps: - @@ -171,6 +183,8 @@ jobs: latest-tensorflow: name: "Latest TensorFlow [dev]" + # Push CI doesn't need this image + if: inputs.image_postfix != '-push-ci' runs-on: ubuntu-latest steps: - diff --git a/.github/workflows/self-push-caller.yml b/.github/workflows/self-push-caller.yml index 6dffef5da7fb9a..4c90b138701435 100644 --- a/.github/workflows/self-push-caller.yml +++ b/.github/workflows/self-push-caller.yml @@ -40,6 +40,8 @@ jobs: needs: check-for-setup if: (github.event_name == 'push') && (needs.check-for-setup.outputs.changed == '1') uses: ./.github/workflows/build-docker-images.yml + with: + image_postfix: "-push-ci" secrets: inherit run_push_ci: diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index 97a76554df3ef0..af00314fa93fce 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -47,7 +47,7 @@ jobs: machine_type: [single-gpu, multi-gpu] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] container: - image: huggingface/transformers-all-latest-gpu + image: huggingface/transformers-all-latest-gpu-push-ci options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: - name: NVIDIA-SMI @@ -62,7 +62,7 @@ jobs: machine_type: [single-gpu, multi-gpu] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] container: - image: huggingface/transformers-all-latest-gpu + image: huggingface/transformers-all-latest-gpu-push-ci options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} @@ -158,7 +158,7 @@ jobs: machine_type: [single-gpu] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] container: - image: huggingface/transformers-all-latest-gpu + image: huggingface/transformers-all-latest-gpu-push-ci options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: # Necessary to get the correct branch name and commit SHA for `workflow_run` event @@ -243,7 +243,7 @@ jobs: machine_type: [multi-gpu] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] container: - image: huggingface/transformers-all-latest-gpu + image: huggingface/transformers-all-latest-gpu-push-ci options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: # Necessary to get the correct branch name and commit SHA for `workflow_run` event @@ -328,7 +328,7 @@ jobs: machine_type: [single-gpu] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] container: - image: huggingface/transformers-pytorch-deepspeed-latest-gpu + image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: # Necessary to get the correct branch name and commit SHA for `workflow_run` event @@ -410,7 +410,7 @@ jobs: machine_type: [multi-gpu] runs-on: [self-hosted, docker-gpu, '${{ matrix.machine_type }}'] container: - image: huggingface/transformers-pytorch-deepspeed-latest-gpu + image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ steps: # Necessary to get the correct branch name and commit SHA for `workflow_run` event From 408b5e307b495b2c14e9c83ff1c62b944ff366af Mon Sep 17 00:00:00 2001 From: Ahmad Elawady Date: Mon, 26 Sep 2022 14:50:58 +0200 Subject: [PATCH 383/539] Remove pos arg from Perceiver's Pre/Postprocessors (#18602) * Remove pos arg from Perceiver's Pre/Postprocessors * Revert the removed pos args in public methods --- src/transformers/models/perceiver/modeling_perceiver.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/perceiver/modeling_perceiver.py b/src/transformers/models/perceiver/modeling_perceiver.py index 5f7856d20200bd..04cf80b9f1f2ec 100755 --- a/src/transformers/models/perceiver/modeling_perceiver.py +++ b/src/transformers/models/perceiver/modeling_perceiver.py @@ -3130,7 +3130,7 @@ def num_channels(self) -> int: return inp_dim + pos_dim - def _build_network_inputs(self, inputs: torch.Tensor, pos: torch.Tensor, network_input_is_1d: bool = True): + def _build_network_inputs(self, inputs: torch.Tensor, network_input_is_1d: bool = True): """ Construct the final input, including position encoding. @@ -3209,7 +3209,7 @@ def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, netw else: raise ValueError("Unsupported data format for conv1x1.") - inputs, inputs_without_pos = self._build_network_inputs(inputs, pos, network_input_is_1d) + inputs, inputs_without_pos = self._build_network_inputs(inputs, network_input_is_1d) modality_sizes = None # Size for each modality, only needed for multimodal return inputs, modality_sizes, inputs_without_pos @@ -3308,7 +3308,7 @@ def num_channels(self) -> int: return pos_dim return self.samples_per_patch + pos_dim - def _build_network_inputs(self, inputs, pos): + def _build_network_inputs(self, inputs): """Construct the final input, including position encoding.""" batch_size = inputs.shape[0] index_dims = inputs.shape[1:-1] @@ -3332,7 +3332,7 @@ def _build_network_inputs(self, inputs, pos): def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True): inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch]) - inputs, inputs_without_pos = self._build_network_inputs(inputs, pos) + inputs, inputs_without_pos = self._build_network_inputs(inputs) modality_sizes = None # Size for each modality, only needed for multimodal return inputs, modality_sizes, inputs_without_pos From 98af4f9b54fd13fb5158c75a8f818122236c2e83 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Sep 2022 14:55:16 +0200 Subject: [PATCH 384/539] Bump protobuf in /examples/research_projects/decision_transformer (#19176) Bumps [protobuf](https://github.com/protocolbuffers/protobuf) from 3.19.4 to 3.19.5. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.19.4...v3.19.5) --- updated-dependencies: - dependency-name: protobuf dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../research_projects/decision_transformer/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/decision_transformer/requirements.txt b/examples/research_projects/decision_transformer/requirements.txt index 1b57c5a660992c..ba3e6a2d34fe48 100644 --- a/examples/research_projects/decision_transformer/requirements.txt +++ b/examples/research_projects/decision_transformer/requirements.txt @@ -144,7 +144,7 @@ portalocker==2.0.0 poyo==0.5.0 prettytable==3.2.0 prompt-toolkit==3.0.28 -protobuf==3.19.4 +protobuf==3.19.5 psutil==5.9.0 ptyprocess==0.7.0 pure-eval==0.2.2 From ea75e9f10e4cdac730dada875941870929016f24 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 26 Sep 2022 14:56:41 +0200 Subject: [PATCH 385/539] Use `assertAlmostEqual` in `BloomEmbeddingTest.test_logits` (#19200) Co-authored-by: ydshieh --- tests/models/bloom/test_modeling_bloom.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/models/bloom/test_modeling_bloom.py b/tests/models/bloom/test_modeling_bloom.py index aa7894d79e26c1..e9ae51a9f554d8 100644 --- a/tests/models/bloom/test_modeling_bloom.py +++ b/tests/models/bloom/test_modeling_bloom.py @@ -771,8 +771,8 @@ def test_logits(self): output_gpu_1, output_gpu_2 = output.split(125440, dim=-1) if cuda_available: - self.assertEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1) - self.assertEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2) + self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6) + self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6) else: self.assertAlmostEqual(output_gpu_1.mean().item(), MEAN_LOGITS_GPU_1, places=6) # 1e-06 precision!! self.assertAlmostEqual(output_gpu_2.mean().item(), MEAN_LOGITS_GPU_2, places=6) From 216b2f9e8061599982b635f3c6b0240f79d21e95 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Mon, 26 Sep 2022 06:43:34 -0700 Subject: [PATCH 386/539] Move the model type check (#19027) Co-authored-by: Ankur Goyal --- .../pipelines/document_question_answering.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/transformers/pipelines/document_question_answering.py b/src/transformers/pipelines/document_question_answering.py index a6afc069fb9fc7..2023ce9e0173ff 100644 --- a/src/transformers/pipelines/document_question_answering.py +++ b/src/transformers/pipelines/document_question_answering.py @@ -116,16 +116,17 @@ class DocumentQuestionAnsweringPipeline(Pipeline): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) if self.model.config.__class__.__name__ == "VisionEncoderDecoderConfig": self.model_type = ModelType.VisionEncoderDecoder if self.model.config.encoder.model_type != "donut-swin": raise ValueError("Currently, the only supported VisionEncoderDecoder model is Donut") - elif self.model.config.__class__.__name__ == "LayoutLMConfig": - self.model_type = ModelType.LayoutLM else: - self.model_type = ModelType.LayoutLMv2andv3 + self.check_model_type(MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING) + if self.model.config.__class__.__name__ == "LayoutLMConfig": + self.model_type = ModelType.LayoutLM + else: + self.model_type = ModelType.LayoutLMv2andv3 def _sanitize_parameters( self, From c20b2c7e18424e35ce7217da1395928244ead78b Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 26 Sep 2022 09:50:48 -0400 Subject: [PATCH 387/539] Use repo_type instead of deprecated datasets repo IDs (#19202) * Use repo_type instead of deprecated datasets repo IDs * Add missing one in doc --- docs/source/en/tasks/semantic_segmentation.mdx | 4 ++-- .../semantic-segmentation/run_semantic_segmentation.py | 6 +++--- .../run_semantic_segmentation_no_trainer.py | 6 +++--- .../models/beit/convert_beit_unilm_to_pytorch.py | 8 ++++---- ...itional_detr_original_pytorch_checkpoint_to_pytorch.py | 4 ++-- .../models/convnext/convert_convnext_to_pytorch.py | 4 ++-- .../convert_cvt_original_pytorch_checkpoint_to_pytorch.py | 4 ++-- ...a2vec_vision_original_pytorch_checkpoint_to_pytorch.py | 4 ++-- .../deformable_detr/convert_deformable_detr_to_pytorch.py | 4 ++-- .../models/deit/convert_deit_timm_to_pytorch.py | 4 ++-- ...convert_detr_original_pytorch_checkpoint_to_pytorch.py | 4 ++-- .../models/dit/convert_dit_unilm_to_pytorch.py | 4 ++-- src/transformers/models/dpt/convert_dpt_to_pytorch.py | 4 ++-- .../models/levit/convert_levit_timm_to_pytorch.py | 4 ++-- .../models/mobilevit/convert_mlcvnets_to_pytorch.py | 4 ++-- .../perceiver/convert_perceiver_haiku_to_pytorch.py | 6 +++--- .../poolformer/convert_poolformer_original_to_pytorch.py | 4 ++-- .../models/regnet/convert_regnet_seer_10b_to_pytorch.py | 4 ++-- .../models/regnet/convert_regnet_to_pytorch.py | 4 ++-- .../models/resnet/convert_resnet_to_pytorch.py | 4 ++-- .../segformer/convert_segformer_original_to_pytorch.py | 4 ++-- .../models/swin/convert_swin_timm_to_pytorch.py | 4 ++-- .../models/swinv2/convert_swinv2_timm_to_pytorch.py | 8 ++++---- src/transformers/models/van/convert_van_to_pytorch.py | 4 ++-- .../models/videomae/convert_videomae_to_pytorch.py | 8 +++++--- .../models/vilt/convert_vilt_original_to_pytorch.py | 4 ++-- src/transformers/models/vit/convert_dino_to_pytorch.py | 4 ++-- .../models/vit/convert_vit_timm_to_pytorch.py | 4 ++-- .../x_clip/convert_x_clip_original_pytorch_to_hf.py | 3 ++- src/transformers/models/yolos/convert_yolos_to_pytorch.py | 4 ++-- tests/models/videomae/test_modeling_videomae.py | 4 +++- tests/models/x_clip/test_modeling_x_clip.py | 2 +- 32 files changed, 74 insertions(+), 69 deletions(-) diff --git a/docs/source/en/tasks/semantic_segmentation.mdx b/docs/source/en/tasks/semantic_segmentation.mdx index c288449552d100..3d1b5ef4537e7f 100644 --- a/docs/source/en/tasks/semantic_segmentation.mdx +++ b/docs/source/en/tasks/semantic_segmentation.mdx @@ -67,9 +67,9 @@ You'll also want to create a dictionary that maps a label id to a label class wh >>> import json >>> from huggingface_hub import cached_download, hf_hub_url ->>> repo_id = "datasets/huggingface/label-files" +>>> repo_id = "huggingface/label-files" >>> filename = "ade20k-id2label.json" ->>> id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename)), "r")) +>>> id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) >>> id2label = {int(k): v for k, v in id2label.items()} >>> label2id = {v: k for k, v in id2label.items()} >>> num_labels = len(id2label) diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index c42dc2a41ca9aa..bf8099135702bb 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -327,12 +327,12 @@ def main(): # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if data_args.dataset_name == "scene_parse_150": - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" else: - repo_id = f"datasets/{data_args.dataset_name}" + repo_id = data_args.dataset_name filename = "id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: str(k) for k, v in id2label.items()} diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index cfc32a93c4c3c0..8eb18434879acb 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -387,12 +387,12 @@ def main(): # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if args.dataset_name == "scene_parse_150": - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" else: - repo_id = f"datasets/{args.dataset_name}" + repo_id = args.dataset_name filename = "id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/beit/convert_beit_unilm_to_pytorch.py b/src/transformers/models/beit/convert_beit_unilm_to_pytorch.py index 90b174d5d4b104..b9287c05bdef77 100644 --- a/src/transformers/models/beit/convert_beit_unilm_to_pytorch.py +++ b/src/transformers/models/beit/convert_beit_unilm_to_pytorch.py @@ -176,7 +176,7 @@ def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path): config = BeitConfig() has_lm_head = False is_semantic = False - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" # set config parameters based on URL if checkpoint_url[-9:-4] == "pt22k": # masked image modeling @@ -188,7 +188,7 @@ def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path): config.use_relative_position_bias = True config.num_labels = 21841 filename = "imagenet-22k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} # this dataset contains 21843 labels but the model only has 21841 # we delete the classes as mentioned in https://github.com/google-research/big_transfer/issues/18 @@ -201,7 +201,7 @@ def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path): config.use_relative_position_bias = True config.num_labels = 1000 filename = "imagenet-1k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} @@ -214,7 +214,7 @@ def convert_beit_checkpoint(checkpoint_url, pytorch_dump_folder_path): config.use_relative_position_bias = True config.num_labels = 150 filename = "ade20k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py index 904530c44c2272..a4e28cbb558a30 100644 --- a/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/conditional_detr/convert_conditional_detr_original_pytorch_checkpoint_to_pytorch.py @@ -237,9 +237,9 @@ def convert_conditional_detr_checkpoint(model_name, pytorch_dump_folder_path): config.num_labels = 250 else: config.num_labels = 91 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/convnext/convert_convnext_to_pytorch.py b/src/transformers/models/convnext/convert_convnext_to_pytorch.py index 4d18bfc9b47f85..e40565c7a691cd 100644 --- a/src/transformers/models/convnext/convert_convnext_to_pytorch.py +++ b/src/transformers/models/convnext/convert_convnext_to_pytorch.py @@ -62,9 +62,9 @@ def get_convnext_config(checkpoint_url): filename = "imagenet-22k-id2label.json" expected_shape = (1, 21841) - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" config.num_labels = num_labels - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} if "1k" not in checkpoint_url: # this dataset contains 21843 labels but the model only has 21841 diff --git a/src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py index a33487c9e62a3c..72a8be4bef83c8 100644 --- a/src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/cvt/convert_cvt_original_pytorch_checkpoint_to_pytorch.py @@ -282,9 +282,9 @@ def convert_cvt_checkpoint(cvt_model, image_size, cvt_file_name, pytorch_dump_fo img_labels_file = "imagenet-1k-id2label.json" num_labels = 1000 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" num_labels = num_labels - id2label = json.load(open(cached_download(hf_hub_url(repo_id, img_labels_file)), "r")) + id2label = json.load(open(cached_download(hf_hub_url(repo_id, img_labels_file, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label diff --git a/src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py index b375167c8de83c..7777e85927cdaa 100755 --- a/src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/data2vec/convert_data2vec_vision_original_pytorch_checkpoint_to_pytorch.py @@ -282,9 +282,9 @@ def main(): config.use_mean_pooling = True config.num_labels = 1000 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py b/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py index 30726c5e9744c6..8e4461d515c2ea 100644 --- a/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py +++ b/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py @@ -108,9 +108,9 @@ def convert_deformable_detr_checkpoint( config.two_stage = two_stage # set labels config.num_labels = 91 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename)), "r")) + id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/deit/convert_deit_timm_to_pytorch.py b/src/transformers/models/deit/convert_deit_timm_to_pytorch.py index a9225c819b48d8..8a8a394c3f8103 100644 --- a/src/transformers/models/deit/convert_deit_timm_to_pytorch.py +++ b/src/transformers/models/deit/convert_deit_timm_to_pytorch.py @@ -140,9 +140,9 @@ def convert_deit_checkpoint(deit_name, pytorch_dump_folder_path): base_model = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size config.num_labels = 1000 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py index feb9d98eb7cf4b..abb7ed72a86272 100644 --- a/src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py +++ b/src/transformers/models/detr/convert_detr_original_pytorch_checkpoint_to_pytorch.py @@ -194,9 +194,9 @@ def convert_detr_checkpoint(model_name, pytorch_dump_folder_path): config.num_labels = 250 else: config.num_labels = 91 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/dit/convert_dit_unilm_to_pytorch.py b/src/transformers/models/dit/convert_dit_unilm_to_pytorch.py index e005946db602a4..07c1a3094ce6d4 100644 --- a/src/transformers/models/dit/convert_dit_unilm_to_pytorch.py +++ b/src/transformers/models/dit/convert_dit_unilm_to_pytorch.py @@ -149,9 +149,9 @@ def convert_dit_checkpoint(checkpoint_url, pytorch_dump_folder_path, push_to_hub # labels if "rvlcdip" in checkpoint_url: config.num_labels = 16 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "rvlcdip-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/dpt/convert_dpt_to_pytorch.py b/src/transformers/models/dpt/convert_dpt_to_pytorch.py index 0050f5e0a83687..dc26d017d73644 100644 --- a/src/transformers/models/dpt/convert_dpt_to_pytorch.py +++ b/src/transformers/models/dpt/convert_dpt_to_pytorch.py @@ -48,9 +48,9 @@ def get_dpt_config(checkpoint_url): config.use_batch_norm_in_fusion_residual = True config.num_labels = 150 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename)), "r")) + id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/levit/convert_levit_timm_to_pytorch.py b/src/transformers/models/levit/convert_levit_timm_to_pytorch.py index d9449aad7ab1d9..a3b59ee8766f5f 100644 --- a/src/transformers/models/levit/convert_levit_timm_to_pytorch.py +++ b/src/transformers/models/levit/convert_levit_timm_to_pytorch.py @@ -85,9 +85,9 @@ def convert_weights_and_push(save_directory: Path, model_name: str = None, push_ num_labels = 1000 expected_shape = (1, num_labels) - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" num_labels = num_labels - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label diff --git a/src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py b/src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py index 7f3e07f7b54018..bc61f8822efa0f 100644 --- a/src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py +++ b/src/transformers/models/mobilevit/convert_mlcvnets_to_pytorch.py @@ -62,8 +62,8 @@ def get_mobilevit_config(mobilevit_name): config.num_labels = 1000 filename = "imagenet-1k-id2label.json" - repo_id = "datasets/huggingface/label-files" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + repo_id = "huggingface/label-files" + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py b/src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py index d1af1f36677a9b..d1a4fd14e57602 100644 --- a/src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py +++ b/src/transformers/models/perceiver/convert_perceiver_haiku_to_pytorch.py @@ -300,7 +300,7 @@ def convert_perceiver_checkpoint(pickle_file, pytorch_dump_folder_path, architec # load HuggingFace model config = PerceiverConfig() subsampling = None - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" if architecture == "MLM": config.qk_channels = 8 * 32 config.v_channels = 1280 @@ -318,7 +318,7 @@ def convert_perceiver_checkpoint(pickle_file, pytorch_dump_folder_path, architec # set labels config.num_labels = 1000 filename = "imagenet-1k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} @@ -367,7 +367,7 @@ def convert_perceiver_checkpoint(pickle_file, pytorch_dump_folder_path, architec model = PerceiverForMultimodalAutoencoding(config) # set labels filename = "kinetics700-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py b/src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py index 6bb6ec2510fd3b..4ab0d2bfb3d457 100644 --- a/src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py +++ b/src/transformers/models/poolformer/convert_poolformer_original_to_pytorch.py @@ -99,14 +99,14 @@ def convert_poolformer_checkpoint(model_name, checkpoint_path, pytorch_dump_fold config = PoolFormerConfig() # set attributes based on model_name - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" size = model_name[-3:] config.num_labels = 1000 filename = "imagenet-1k-id2label.json" expected_shape = (1, 1000) # set config attributes - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py b/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py index a43967d0095d2b..4a73b9623f113c 100644 --- a/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py +++ b/src/transformers/models/regnet/convert_regnet_seer_10b_to_pytorch.py @@ -163,9 +163,9 @@ def convert_weights_and_push(save_directory: Path, model_name: str = None, push_ filename = "imagenet-1k-id2label.json" num_labels = 1000 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" num_labels = num_labels - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename)), "r")) + id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label diff --git a/src/transformers/models/regnet/convert_regnet_to_pytorch.py b/src/transformers/models/regnet/convert_regnet_to_pytorch.py index 9bb0ba0f053283..acb74dc89dce93 100644 --- a/src/transformers/models/regnet/convert_regnet_to_pytorch.py +++ b/src/transformers/models/regnet/convert_regnet_to_pytorch.py @@ -224,9 +224,9 @@ def convert_weights_and_push(save_directory: Path, model_name: str = None, push_ num_labels = 1000 expected_shape = (1, num_labels) - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" num_labels = num_labels - id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename)), "r")) + id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label diff --git a/src/transformers/models/resnet/convert_resnet_to_pytorch.py b/src/transformers/models/resnet/convert_resnet_to_pytorch.py index 55a865ed593620..ef3d564185df8c 100644 --- a/src/transformers/models/resnet/convert_resnet_to_pytorch.py +++ b/src/transformers/models/resnet/convert_resnet_to_pytorch.py @@ -128,9 +128,9 @@ def convert_weights_and_push(save_directory: Path, model_name: str = None, push_ num_labels = 1000 expected_shape = (1, num_labels) - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" num_labels = num_labels - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label diff --git a/src/transformers/models/segformer/convert_segformer_original_to_pytorch.py b/src/transformers/models/segformer/convert_segformer_original_to_pytorch.py index da0ca7b3cc27a4..00dddc9974a953 100644 --- a/src/transformers/models/segformer/convert_segformer_original_to_pytorch.py +++ b/src/transformers/models/segformer/convert_segformer_original_to_pytorch.py @@ -128,7 +128,7 @@ def convert_segformer_checkpoint(model_name, checkpoint_path, pytorch_dump_folde encoder_only = False # set attributes based on model_name - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" if "segformer" in model_name: size = model_name[len("segformer.") : len("segformer.") + 2] if "ade" in model_name: @@ -151,7 +151,7 @@ def convert_segformer_checkpoint(model_name, checkpoint_path, pytorch_dump_folde raise ValueError(f"Model {model_name} not supported") # set config attributes - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/swin/convert_swin_timm_to_pytorch.py b/src/transformers/models/swin/convert_swin_timm_to_pytorch.py index 0d09d27fa2322b..860fdd1b54d2af 100644 --- a/src/transformers/models/swin/convert_swin_timm_to_pytorch.py +++ b/src/transformers/models/swin/convert_swin_timm_to_pytorch.py @@ -39,9 +39,9 @@ def get_swin_config(swin_name): num_classes = 21841 else: num_classes = 1000 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py b/src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py index 148793e3043b84..7af3bfb86c1741 100644 --- a/src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py +++ b/src/transformers/models/swinv2/convert_swinv2_timm_to_pytorch.py @@ -63,18 +63,18 @@ def get_swinv2_config(swinv2_name): if ("22k" in swinv2_name) and ("to" not in swinv2_name): num_classes = 21841 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "imagenet-22k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} else: num_classes = 1000 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/van/convert_van_to_pytorch.py b/src/transformers/models/van/convert_van_to_pytorch.py index e2c0c95e64502b..ded3c3500dad24 100644 --- a/src/transformers/models/van/convert_van_to_pytorch.py +++ b/src/transformers/models/van/convert_van_to_pytorch.py @@ -168,9 +168,9 @@ def convert_weights_and_push(save_directory: Path, model_name: str = None, push_ filename = "imagenet-1k-id2label.json" num_labels = 1000 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" num_labels = num_labels - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label diff --git a/src/transformers/models/videomae/convert_videomae_to_pytorch.py b/src/transformers/models/videomae/convert_videomae_to_pytorch.py index 60e5ae8f5f41c0..2f4ce5d44704a7 100644 --- a/src/transformers/models/videomae/convert_videomae_to_pytorch.py +++ b/src/transformers/models/videomae/convert_videomae_to_pytorch.py @@ -47,7 +47,7 @@ def get_videomae_config(model_name): config.use_mean_pooling = False if "finetuned" in model_name: - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" if "kinetics" in model_name: config.num_labels = 400 filename = "kinetics400-id2label.json" @@ -56,7 +56,7 @@ def get_videomae_config(model_name): filename = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.") - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} @@ -145,7 +145,9 @@ def convert_state_dict(orig_state_dict, config): # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): - file = hf_hub_download(repo_id="datasets/hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy") + file = hf_hub_download( + repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" + ) video = np.load(file) return list(video) diff --git a/src/transformers/models/vilt/convert_vilt_original_to_pytorch.py b/src/transformers/models/vilt/convert_vilt_original_to_pytorch.py index 3a186e1d2d918a..5e737f784c81b2 100644 --- a/src/transformers/models/vilt/convert_vilt_original_to_pytorch.py +++ b/src/transformers/models/vilt/convert_vilt_original_to_pytorch.py @@ -180,9 +180,9 @@ def convert_vilt_checkpoint(checkpoint_url, pytorch_dump_folder_path): if "vqa" in checkpoint_url: vqa_model = True config.num_labels = 3129 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "vqa2-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/vit/convert_dino_to_pytorch.py b/src/transformers/models/vit/convert_dino_to_pytorch.py index 8922684594a59e..1a8ba21a658b8b 100644 --- a/src/transformers/models/vit/convert_dino_to_pytorch.py +++ b/src/transformers/models/vit/convert_dino_to_pytorch.py @@ -142,9 +142,9 @@ def convert_vit_checkpoint(model_name, pytorch_dump_folder_path, base_model=True # set labels if required if not base_model: config.num_labels = 1000 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/vit/convert_vit_timm_to_pytorch.py b/src/transformers/models/vit/convert_vit_timm_to_pytorch.py index 30495bd0f1e83a..bc1f7f72dd5f3c 100644 --- a/src/transformers/models/vit/convert_vit_timm_to_pytorch.py +++ b/src/transformers/models/vit/convert_vit_timm_to_pytorch.py @@ -147,9 +147,9 @@ def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path): config.image_size = int(vit_name[-9:-6]) else: config.num_labels = 1000 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py b/src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py index 2f5364f440986f..8210b3f709e39a 100644 --- a/src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py +++ b/src/transformers/models/x_clip/convert_x_clip_original_pytorch_to_hf.py @@ -207,8 +207,9 @@ def prepare_video(num_frames): elif num_frames == 32: filename = "eating_spaghetti_32_frames.npy" file = hf_hub_download( - repo_id="datasets/hf-internal-testing/spaghetti-video", + repo_id="hf-internal-testing/spaghetti-video", filename=filename, + repo_type="dataset", ) video = np.load(file) return list(video) diff --git a/src/transformers/models/yolos/convert_yolos_to_pytorch.py b/src/transformers/models/yolos/convert_yolos_to_pytorch.py index 7f4161a632d89f..be840151a1bfa0 100644 --- a/src/transformers/models/yolos/convert_yolos_to_pytorch.py +++ b/src/transformers/models/yolos/convert_yolos_to_pytorch.py @@ -57,9 +57,9 @@ def get_yolos_config(yolos_name): config.image_size = [800, 1344] config.num_labels = 91 - repo_id = "datasets/huggingface/label-files" + repo_id = "huggingface/label-files" filename = "coco-detection-id2label.json" - id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} diff --git a/tests/models/videomae/test_modeling_videomae.py b/tests/models/videomae/test_modeling_videomae.py index adce62021c9ded..bc665410b6c6b9 100644 --- a/tests/models/videomae/test_modeling_videomae.py +++ b/tests/models/videomae/test_modeling_videomae.py @@ -342,7 +342,9 @@ def check_hidden_states_output(inputs_dict, config, model_class): # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): - file = hf_hub_download(repo_id="datasets/hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy") + file = hf_hub_download( + repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" + ) video = np.load(file) return list(video) diff --git a/tests/models/x_clip/test_modeling_x_clip.py b/tests/models/x_clip/test_modeling_x_clip.py index 62c8e9992b0d9c..0a70fdcb44c73c 100644 --- a/tests/models/x_clip/test_modeling_x_clip.py +++ b/tests/models/x_clip/test_modeling_x_clip.py @@ -633,7 +633,7 @@ def test_model_from_pretrained(self): # We will verify our results on a spaghetti video def prepare_video(): file = hf_hub_download( - repo_id="datasets/hf-internal-testing/spaghetti-video", filename="eating_spaghetti_8_frames.npy" + repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti_8_frames.npy", repo_type="dataset" ) video = np.load(file) return list(video) From be4f269979e157f1ba3326d14a837dc61096b2e5 Mon Sep 17 00:00:00 2001 From: IMvision12 <88665786+IMvision12@users.noreply.github.com> Date: Tue, 27 Sep 2022 02:32:57 +0530 Subject: [PATCH 388/539] Updated hf_argparser.py (#19188) * Changed json_file_parser function and added yaml parser function * update hf_argparser * Added allow_extra_keys argument --- src/transformers/hf_argparser.py | 64 +++++++++++++++++++------------- 1 file changed, 38 insertions(+), 26 deletions(-) diff --git a/src/transformers/hf_argparser.py b/src/transformers/hf_argparser.py index 140651b2e82ab6..b74db2ee4ea1d4 100644 --- a/src/transformers/hf_argparser.py +++ b/src/transformers/hf_argparser.py @@ -22,6 +22,8 @@ from pathlib import Path from typing import Any, Dict, Iterable, NewType, Optional, Tuple, Union, get_type_hints +import yaml + DataClass = NewType("DataClass", Any) DataClassType = NewType("DataClassType", Any) @@ -234,29 +236,27 @@ def parse_args_into_dataclasses( return (*outputs,) - def parse_json_file(self, json_file: str, allow_extra_keys: bool = False) -> Tuple[DataClass, ...]: + def parse_dict(self, args: Dict[str, Any], allow_extra_keys: bool = False) -> Tuple[DataClass, ...]: """ - Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the - dataclass types. + Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass + types. Args: - json_file (`str` or `os.PathLike`): - File name of the json file to parse + args (`dict`): + dict containing config values allow_extra_keys (`bool`, *optional*, defaults to `False`): - Defaults to False. If False, will raise an exception if the json file contains keys that are not - parsed. + Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed. Returns: Tuple consisting of: - the dataclass instances in the same order as they were passed to the initializer. """ - data = json.loads(Path(json_file).read_text()) - unused_keys = set(data.keys()) + unused_keys = set(args.keys()) outputs = [] for dtype in self.dataclass_types: keys = {f.name for f in dataclasses.fields(dtype) if f.init} - inputs = {k: v for k, v in data.items() if k in keys} + inputs = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys()) obj = dtype(**inputs) outputs.append(obj) @@ -264,30 +264,42 @@ def parse_json_file(self, json_file: str, allow_extra_keys: bool = False) -> Tup raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}") return tuple(outputs) - def parse_dict(self, args: Dict[str, Any], allow_extra_keys: bool = False) -> Tuple[DataClass, ...]: + def parse_json_file(self, json_file: str, allow_extra_keys: bool = False) -> Tuple[DataClass, ...]: """ - Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass - types. + Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the + dataclass types. Args: - args (`dict`): - dict containing config values + json_file (`str` or `os.PathLike`): + File name of the json file to parse allow_extra_keys (`bool`, *optional*, defaults to `False`): - Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed. + Defaults to False. If False, will raise an exception if the json file contains keys that are not + parsed. Returns: Tuple consisting of: - the dataclass instances in the same order as they were passed to the initializer. """ - unused_keys = set(args.keys()) - outputs = [] - for dtype in self.dataclass_types: - keys = {f.name for f in dataclasses.fields(dtype) if f.init} - inputs = {k: v for k, v in args.items() if k in keys} - unused_keys.difference_update(inputs.keys()) - obj = dtype(**inputs) - outputs.append(obj) - if not allow_extra_keys and unused_keys: - raise ValueError(f"Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}") + outputs = self.parse_dict(json.loads(Path(json_file).read_text()), allow_extra_keys=allow_extra_keys) + return tuple(outputs) + + def parse_yaml_file(self, yaml_file: str, allow_extra_keys: bool = False) -> Tuple[DataClass, ...]: + """ + Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the + dataclass types. + + Args: + yaml_file (`str` or `os.PathLike`): + File name of the yaml file to parse + allow_extra_keys (`bool`, *optional*, defaults to `False`): + Defaults to False. If False, will raise an exception if the json file contains keys that are not + parsed. + + Returns: + Tuple consisting of: + + - the dataclass instances in the same order as they were passed to the initializer. + """ + outputs = self.parse_dict(yaml.safe_load(yaml_file), allow_extra_keys=allow_extra_keys) return tuple(outputs) From ca0886395b90aef52dadac8cf0002d0ba297fa76 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 26 Sep 2022 23:58:02 +0200 Subject: [PATCH 389/539] Add warning for torchaudio <= 0.10 in MCTCTFeatureExtractor (#19203) Co-authored-by: ydshieh --- src/transformers/models/mctct/feature_extraction_mctct.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/transformers/models/mctct/feature_extraction_mctct.py b/src/transformers/models/mctct/feature_extraction_mctct.py index 573551bcf7780d..9785d530e92626 100644 --- a/src/transformers/models/mctct/feature_extraction_mctct.py +++ b/src/transformers/models/mctct/feature_extraction_mctct.py @@ -21,6 +21,7 @@ import numpy as np import torch import torchaudio +from packaging import version from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature @@ -30,6 +31,13 @@ logger = logging.get_logger(__name__) +parsed_torchaudio_version_base = version.parse(version.parse(torchaudio.__version__).base_version) +if not parsed_torchaudio_version_base >= version.parse("0.10"): + logger.warning( + f"You are using torchaudio=={torchaudio.__version__}, but torchaudio>=0.10.0 is required to use " + "MCTCTFeatureExtractor. This requires torch>=1.10.0. Please upgrade torch and torchaudio." + ) + class MCTCTFeatureExtractor(SequenceFeatureExtractor): r""" From a32f97c37d53cfc4299e83678c4082e3ddb00bf9 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 26 Sep 2022 18:01:00 -0400 Subject: [PATCH 390/539] Fix cached_file in offline mode for cached non-existing files (#19206) * Fix cached_file in offline mode for cached non-existing files * Add tests * Test with offline mode --- src/transformers/utils/hub.py | 4 +- tests/utils/test_file_utils.py | 48 +------------- tests/utils/test_hub_utils.py | 110 +++++++++++++++++++++++++++++++++ utils/tests_fetcher.py | 2 +- 4 files changed, 114 insertions(+), 50 deletions(-) create mode 100644 tests/utils/test_hub_utils.py diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index cd4c92e50b3774..8c149bec648900 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -435,7 +435,7 @@ def cached_file( except LocalEntryNotFoundError: # We try to see if we have a cached version (not up to date): resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) - if resolved_file is not None: + if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: return resolved_file if not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors: return None @@ -457,7 +457,7 @@ def cached_file( except HTTPError as err: # First we try to see if we have a cached version (not up to date): resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) - if resolved_file is not None: + if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: return resolved_file if not _raise_exceptions_for_connection_errors: return None diff --git a/tests/utils/test_file_utils.py b/tests/utils/test_file_utils.py index 60676e9f7d9d37..e7963bfa51a5b2 100644 --- a/tests/utils/test_file_utils.py +++ b/tests/utils/test_file_utils.py @@ -15,28 +15,14 @@ import contextlib import importlib import io -import json -import tempfile import unittest -from pathlib import Path import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER -from transformers.utils import ( - FLAX_WEIGHTS_NAME, - TF2_WEIGHTS_NAME, - WEIGHTS_NAME, - ContextManagers, - find_labels, - get_file_from_repo, - has_file, - is_flax_available, - is_tf_available, - is_torch_available, -) +from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available MODEL_ID = DUMMY_UNKNOWN_IDENTIFIER @@ -77,38 +63,6 @@ def test_module_spec_available(self): assert importlib.util.find_spec("transformers") is not None -class GetFromCacheTests(unittest.TestCase): - def test_has_file(self): - self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only", WEIGHTS_NAME)) - self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", TF2_WEIGHTS_NAME)) - self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", FLAX_WEIGHTS_NAME)) - - def test_get_file_from_repo_distant(self): - # `get_file_from_repo` returns None if the file does not exist - self.assertIsNone(get_file_from_repo("bert-base-cased", "ahah.txt")) - - # The function raises if the repository does not exist. - with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): - get_file_from_repo("bert-base-case", "config.json") - - # The function raises if the revision does not exist. - with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): - get_file_from_repo("bert-base-cased", "config.json", revision="ahaha") - - resolved_file = get_file_from_repo("bert-base-cased", "config.json") - # The name is the cached name which is not very easy to test, so instead we load the content. - config = json.loads(open(resolved_file, "r").read()) - self.assertEqual(config["hidden_size"], 768) - - def test_get_file_from_repo_local(self): - with tempfile.TemporaryDirectory() as tmp_dir: - filename = Path(tmp_dir) / "a.txt" - filename.touch() - self.assertEqual(get_file_from_repo(tmp_dir, "a.txt"), str(filename)) - - self.assertIsNone(get_file_from_repo(tmp_dir, "b.txt")) - - class GenericUtilTests(unittest.TestCase): @unittest.mock.patch("sys.stdout", new_callable=io.StringIO) def test_context_managers_no_context(self, mock_stdout): diff --git a/tests/utils/test_hub_utils.py b/tests/utils/test_hub_utils.py new file mode 100644 index 00000000000000..f55a0ae4315c4a --- /dev/null +++ b/tests/utils/test_hub_utils.py @@ -0,0 +1,110 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import os +import tempfile +import unittest +from pathlib import Path + +from transformers.utils import ( + CONFIG_NAME, + FLAX_WEIGHTS_NAME, + TF2_WEIGHTS_NAME, + TRANSFORMERS_CACHE, + WEIGHTS_NAME, + cached_file, + get_file_from_repo, + has_file, +) + + +RANDOM_BERT = "hf-internal-testing/tiny-random-bert" +CACHE_DIR = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") +FULL_COMMIT_HASH = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" + + +class GetFromCacheTests(unittest.TestCase): + def test_cached_file(self): + archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) + # Should have downloaded the file in here + self.assertTrue(os.path.isdir(CACHE_DIR)) + # Cache should contain at least those three subfolders: + for subfolder in ["blobs", "refs", "snapshots"]: + self.assertTrue(os.path.isdir(os.path.join(CACHE_DIR, subfolder))) + with open(os.path.join(CACHE_DIR, "refs", "main")) as f: + main_commit = f.read() + self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", main_commit, CONFIG_NAME)) + self.assertTrue(os.path.isfile(archive_file)) + + # File is cached at the same place the second time. + new_archive_file = cached_file(RANDOM_BERT, CONFIG_NAME) + self.assertEqual(archive_file, new_archive_file) + + # Using a specific revision to test the full commit hash. + archive_file = cached_file(RANDOM_BERT, CONFIG_NAME, revision="9b8c223") + self.assertEqual(archive_file, os.path.join(CACHE_DIR, "snapshots", FULL_COMMIT_HASH, CONFIG_NAME)) + + def test_cached_file_errors(self): + with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): + _ = cached_file("tiny-random-bert", CONFIG_NAME) + + with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): + _ = cached_file(RANDOM_BERT, CONFIG_NAME, revision="aaaa") + + with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): + _ = cached_file(RANDOM_BERT, "conf") + + def test_non_existence_is_cached(self): + with self.assertRaisesRegex(EnvironmentError, "does not appear to have a file named"): + _ = cached_file(RANDOM_BERT, "conf") + + with open(os.path.join(CACHE_DIR, "refs", "main")) as f: + main_commit = f.read() + self.assertTrue(os.path.isfile(os.path.join(CACHE_DIR, ".no_exist", main_commit, "conf"))) + + path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_missing_entries=False) + self.assertIsNone(path) + + path = cached_file(RANDOM_BERT, "conf", local_files_only=True, _raise_exceptions_for_missing_entries=False) + self.assertIsNone(path) + + def test_has_file(self): + self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only", WEIGHTS_NAME)) + self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", TF2_WEIGHTS_NAME)) + self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", FLAX_WEIGHTS_NAME)) + + def test_get_file_from_repo_distant(self): + # `get_file_from_repo` returns None if the file does not exist + self.assertIsNone(get_file_from_repo("bert-base-cased", "ahah.txt")) + + # The function raises if the repository does not exist. + with self.assertRaisesRegex(EnvironmentError, "is not a valid model identifier"): + get_file_from_repo("bert-base-case", CONFIG_NAME) + + # The function raises if the revision does not exist. + with self.assertRaisesRegex(EnvironmentError, "is not a valid git identifier"): + get_file_from_repo("bert-base-cased", CONFIG_NAME, revision="ahaha") + + resolved_file = get_file_from_repo("bert-base-cased", CONFIG_NAME) + # The name is the cached name which is not very easy to test, so instead we load the content. + config = json.loads(open(resolved_file, "r").read()) + self.assertEqual(config["hidden_size"], 768) + + def test_get_file_from_repo_local(self): + with tempfile.TemporaryDirectory() as tmp_dir: + filename = Path(tmp_dir) / "a.txt" + filename.touch() + self.assertEqual(get_file_from_repo(tmp_dir, "a.txt"), str(filename)) + + self.assertIsNone(get_file_from_repo(tmp_dir, "b.txt")) diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index 167bf75db1c25d..0af1a8ad8eb735 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -354,7 +354,7 @@ def create_reverse_dependency_map(): "feature_extraction_utils.py": "test_feature_extraction_common.py", "file_utils.py": ["utils/test_file_utils.py", "utils/test_model_output.py"], "utils/generic.py": ["utils/test_file_utils.py", "utils/test_model_output.py", "utils/test_generic.py"], - "utils/hub.py": "utils/test_file_utils.py", + "utils/hub.py": "utils/test_hub_utils.py", "modelcard.py": "utils/test_model_card.py", "modeling_flax_utils.py": "test_modeling_flax_common.py", "modeling_tf_utils.py": ["test_modeling_tf_common.py", "utils/test_modeling_tf_core.py"], From 7132d55ca1824d5bd804755ce10db5e182823549 Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan Date: Tue, 27 Sep 2022 14:09:31 +0530 Subject: [PATCH 391/539] Remove unused `cur_len` in generation_utils.py (#18874) * remove unused cur_len in generation_utils.py * linting --- src/transformers/generation_utils.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 3a9b7b24480ec2..1509e92b2d3f12 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -534,7 +534,6 @@ def _prepare_decoder_input_ids_for_generation( model_kwargs: Optional[Dict[str, torch.Tensor]] = None, device: torch.device = None, ) -> torch.LongTensor: - if model_kwargs is not None and "decoder_input_ids" in model_kwargs: return model_kwargs.pop("decoder_input_ids") else: @@ -1720,11 +1719,9 @@ def greedy_search( # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) - cur_len = input_ids.shape[-1] this_peer_finished = False # used by synced_gpus only while True: - if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence @@ -1747,7 +1744,6 @@ def greedy_search( ) if synced_gpus and this_peer_finished: - cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] @@ -1787,7 +1783,6 @@ def greedy_search( model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) - cur_len = cur_len + 1 # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: @@ -1973,12 +1968,10 @@ def sample( # keep track of which sequences are already finished unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) - cur_len = input_ids.shape[-1] this_peer_finished = False # used by synced_gpus only # auto-regressive generation while True: - if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence @@ -2001,7 +1994,6 @@ def sample( ) if synced_gpus and this_peer_finished: - cur_len = cur_len + 1 continue # don't waste resources running the code we don't need next_token_logits = outputs.logits[:, -1, :] @@ -2043,7 +2035,6 @@ def sample( model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) - cur_len = cur_len + 1 # if eos_token was found in one sentence, set sentence to finished if eos_token_id is not None: @@ -2248,7 +2239,6 @@ def beam_search( this_peer_finished = False # used by synced_gpus only while True: - if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence @@ -2561,7 +2551,6 @@ def beam_sample( this_peer_finished = False # used by synced_gpus only while True: - if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence @@ -2884,7 +2873,6 @@ def group_beam_search( this_peer_finished = False # used by synced_gpus only while True: - if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence @@ -3083,7 +3071,6 @@ def constrained_beam_search( synced_gpus: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: - r""" Generates sequences of token ids for models with a language modeling head using **constrained beam search decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. @@ -3247,7 +3234,6 @@ def constrained_beam_search( this_peer_finished = False # used by synced_gpus only while True: - if synced_gpus: # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. # The following logic allows an early break if all peers finished generating their sequence From ea540a5977e602eb8072bfb3120c95f163c64a02 Mon Sep 17 00:00:00 2001 From: Arijit Mukherjee Date: Tue, 27 Sep 2022 16:42:56 +0530 Subject: [PATCH 392/539] add wav2vec2_alignment (#16782) * add wav2vec2_alignment * Update alignment.py * Update examples/research_projects/wav2vec2/alignment.py Co-authored-by: Patrick von Platen * Update examples/research_projects/wav2vec2/alignment.py Co-authored-by: Patrick von Platen * Update examples/research_projects/wav2vec2/alignment.py Co-authored-by: Patrick von Platen * Update examples/research_projects/wav2vec2/alignment.py Co-authored-by: Patrick von Platen * Update README.md * fix style * fix imports * fix multithread * fix bash script * [@anton-l] Style fixes and docstrings * [@anton-l] Style fixes and docstrings * Update alignment.py fix blank id in backtrack Co-authored-by: Patrick von Platen Co-authored-by: anton-l --- examples/research_projects/wav2vec2/README.md | 31 +++ .../research_projects/wav2vec2/alignment.py | 224 ++++++++++++++++++ .../wav2vec2/run_alignment.sh | 8 + 3 files changed, 263 insertions(+) create mode 100644 examples/research_projects/wav2vec2/alignment.py create mode 100644 examples/research_projects/wav2vec2/run_alignment.sh diff --git a/examples/research_projects/wav2vec2/README.md b/examples/research_projects/wav2vec2/README.md index 8f9da274f05db9..1dcd8dcc283538 100644 --- a/examples/research_projects/wav2vec2/README.md +++ b/examples/research_projects/wav2vec2/README.md @@ -216,3 +216,34 @@ PYTHONPATH=../../../src deepspeed --num_gpus 4 run_pretrain.py \ --fp16 \ --deepspeed ds_config_wav2vec2_zero2.json \ ``` + + +### Forced Alignment + +Character level forced alignment for audio and text pairs with wav2vec2 models finetuned on ASR task for a specific language. +Inspired by [this](https://pytorch.org/tutorials/intermediate/forced_alignment_with_torchaudio_tutorial.html) Pytorch tutorial. + +#### Input Formats + + Input format in script.txt Input format in wavs directroy + 0000 sentence1 0000.wav + 0001 sentence2 0001.wav + +#### Output Format + +Output directory will contain 0000.txt and 0001.txt. Each file will have format like below + + char score start_ms end_ms + h 0.25 1440 1520 + +#### Run command + +``` +python alignment.py \ +--model_name="arijitx/wav2vec2-xls-r-300m-bengali" \ +--wav_dir="./wavs" +--text_file="script.txt" \ +--input_wavs_sr=48000 \ +--output_dir="./out_alignment" \ +--cuda +``` diff --git a/examples/research_projects/wav2vec2/alignment.py b/examples/research_projects/wav2vec2/alignment.py new file mode 100644 index 00000000000000..24347a55a0bce7 --- /dev/null +++ b/examples/research_projects/wav2vec2/alignment.py @@ -0,0 +1,224 @@ +# Parts of the code are adapted from the snippets provided in the TorchAudio Wav2Vec forced alignment tutorial. +# The full tutorial can be found here: https://pytorch.org/audio/stable/tutorials/forced_alignment_tutorial.html + +import argparse +import os +from dataclasses import dataclass + +import torch +import torchaudio +from tqdm import tqdm + +from transformers import AutoConfig, AutoModelForCTC, AutoProcessor + + +class Wav2Vec2Aligner: + def __init__(self, model_name, input_wavs_sr, cuda): + self.cuda = cuda + self.config = AutoConfig.from_pretrained(model_name) + self.model = AutoModelForCTC.from_pretrained(model_name) + self.model.eval() + if self.cuda: + self.model.to(device="cuda") + self.processor = AutoProcessor.from_pretrained(model_name) + self.resampler = torchaudio.transforms.Resample(input_wavs_sr, 16_000) + blank_id = 0 + vocab = list(self.processor.tokenizer.get_vocab().keys()) + for i in range(len(vocab)): + if vocab[i] == "[PAD]" or vocab[i] == "": + blank_id = i + print("Blank Token id [PAD]/", blank_id) + self.blank_id = blank_id + + def speech_file_to_array_fn(self, wav_path): + speech_array, sampling_rate = torchaudio.load(wav_path) + speech = self.resampler(speech_array).squeeze().numpy() + return speech + + def align_single_sample(self, item): + blank_id = self.blank_id + transcript = "|".join(item["sent"].split(" ")) + if not os.path.isfile(item["wav_path"]): + print(item["wav_path"], "not found in wavs directory") + + speech_array = self.speech_file_to_array_fn(item["wav_path"]) + inputs = self.processor(speech_array, sampling_rate=16_000, return_tensors="pt", padding=True) + if self.cuda: + inputs = inputs.to(device="cuda") + + with torch.no_grad(): + logits = self.model(inputs.input_values).logits + + # get the emission probability at frame level + emissions = torch.log_softmax(logits, dim=-1) + emission = emissions[0].cpu().detach() + + # get labels from vocab + labels = ([""] + list(self.processor.tokenizer.get_vocab().keys()))[ + :-1 + ] # logits don't align with the tokenizer's vocab + + dictionary = {c: i for i, c in enumerate(labels)} + tokens = [] + for c in transcript: + if c in dictionary: + tokens.append(dictionary[c]) + + def get_trellis(emission, tokens, blank_id=0): + """ + Build a trellis matrix of shape (num_frames + 1, num_tokens + 1) + that represents the probabilities of each source token being at a certain time step + """ + num_frames = emission.size(0) + num_tokens = len(tokens) + + # Trellis has extra diemsions for both time axis and tokens. + # The extra dim for tokens represents (start-of-sentence) + # The extra dim for time axis is for simplification of the code. + trellis = torch.full((num_frames + 1, num_tokens + 1), -float("inf")) + trellis[:, 0] = 0 + for t in range(num_frames): + trellis[t + 1, 1:] = torch.maximum( + # Score for staying at the same token + trellis[t, 1:] + emission[t, blank_id], + # Score for changing to the next token + trellis[t, :-1] + emission[t, tokens], + ) + return trellis + + trellis = get_trellis(emission, tokens, blank_id) + + @dataclass + class Point: + token_index: int + time_index: int + score: float + + def backtrack(trellis, emission, tokens, blank_id=0): + """ + Walk backwards from the last (sentence_token, time_step) pair to build the optimal sequence alignment path + """ + # Note: + # j and t are indices for trellis, which has extra dimensions + # for time and tokens at the beginning. + # When referring to time frame index `T` in trellis, + # the corresponding index in emission is `T-1`. + # Similarly, when referring to token index `J` in trellis, + # the corresponding index in transcript is `J-1`. + j = trellis.size(1) - 1 + t_start = torch.argmax(trellis[:, j]).item() + + path = [] + for t in range(t_start, 0, -1): + # 1. Figure out if the current position was stay or change + # Note (again): + # `emission[J-1]` is the emission at time frame `J` of trellis dimension. + # Score for token staying the same from time frame J-1 to T. + stayed = trellis[t - 1, j] + emission[t - 1, blank_id] + # Score for token changing from C-1 at T-1 to J at T. + changed = trellis[t - 1, j - 1] + emission[t - 1, tokens[j - 1]] + + # 2. Store the path with frame-wise probability. + prob = emission[t - 1, tokens[j - 1] if changed > stayed else 0].exp().item() + # Return token index and time index in non-trellis coordinate. + path.append(Point(j - 1, t - 1, prob)) + + # 3. Update the token + if changed > stayed: + j -= 1 + if j == 0: + break + else: + raise ValueError("Failed to align") + return path[::-1] + + path = backtrack(trellis, emission, tokens, blank_id) + + @dataclass + class Segment: + label: str + start: int + end: int + score: float + + def __repr__(self): + return f"{self.label}\t{self.score:4.2f}\t{self.start*20:5d}\t{self.end*20:5d}" + + @property + def length(self): + return self.end - self.start + + def merge_repeats(path): + """ + Merge repeated tokens into a single segment. Note: this shouldn't affect repeated characters from the + original sentences (e.g. `ll` in `hello`) + """ + i1, i2 = 0, 0 + segments = [] + while i1 < len(path): + while i2 < len(path) and path[i1].token_index == path[i2].token_index: + i2 += 1 + score = sum(path[k].score for k in range(i1, i2)) / (i2 - i1) + segments.append( + Segment( + transcript[path[i1].token_index], + path[i1].time_index, + path[i2 - 1].time_index + 1, + score, + ) + ) + i1 = i2 + return segments + + segments = merge_repeats(path) + with open(item["out_path"], "w") as out_align: + for seg in segments: + out_align.write(str(seg) + "\n") + + def align_data(self, wav_dir, text_file, output_dir): + + if not os.path.exists(output_dir): + os.makedirs(output_dir) + + # load text file + lines = open(text_file, encoding="utf8").readlines() + + items = [] + for line in lines: + if len(line.strip().split("\t")) != 2: + print("Script must be in format: 00001 this is my sentence") + exit() + + wav_name, sentence = line.strip().split("\t") + wav_path = os.path.join(wav_dir, wav_name + ".wav") + out_path = os.path.join(output_dir, wav_name + ".txt") + + items.append({"sent": sentence, "wav_path": wav_path, "out_path": out_path}) + print("Number of samples found in script file", len(items)) + + for item in tqdm(items): + self.align_single_sample(item) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--model_name", type=str, default="arijitx/wav2vec2-xls-r-300m-bengali", help="wav2vec model name" + ) + parser.add_argument("--wav_dir", type=str, default="./wavs", help="directory containing wavs") + parser.add_argument("--text_file", type=str, default="script.txt", help="file containing text") + parser.add_argument("--input_wavs_sr", type=int, default=16000, help="sampling rate of input audios") + parser.add_argument( + "--output_dir", type=str, default="./out_alignment", help="output directory containing the alignment files" + ) + parser.add_argument("--cuda", action="store_true") + + args = parser.parse_args() + + aligner = Wav2Vec2Aligner(args.model_name, args.input_wavs_sr, args.cuda) + aligner.align_data(args.wav_dir, args.text_file, args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/examples/research_projects/wav2vec2/run_alignment.sh b/examples/research_projects/wav2vec2/run_alignment.sh new file mode 100644 index 00000000000000..95bfe02cf03745 --- /dev/null +++ b/examples/research_projects/wav2vec2/run_alignment.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +python alignment.py \ +--model_name="arijitx/wav2vec2-xls-r-300m-bengali" \ +--wav_dir="./wavs" \ +--text_file="script.txt" \ +--input_wavs_sr=48000 \ +--output_dir="./out_alignment" \ +--cuda From 88f597ba6a5268dc737358750217eee52e6aef88 Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Tue, 27 Sep 2022 19:51:51 +0800 Subject: [PATCH 393/539] add doc for hyperparameter search (#19192) * add doc for hyperparameter search * update doc --- docs/source/en/_toctree.yml | 2 + docs/source/en/hpo_train.mdx | 120 +++++++++++++++++++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 docs/source/en/hpo_train.mdx diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 223c5d2a6998fc..5e6a0f9a309a60 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -101,6 +101,8 @@ title: Instantiating a big model - local: debugging title: Debugging + - local: hpo_train + title: Hyperparameter Search using Trainer API title: Performance and scalability - sections: - local: contributing diff --git a/docs/source/en/hpo_train.mdx b/docs/source/en/hpo_train.mdx new file mode 100644 index 00000000000000..e9f01fe0905b8b --- /dev/null +++ b/docs/source/en/hpo_train.mdx @@ -0,0 +1,120 @@ + + +# Hyperparameter Search using Trainer API + +🤗 Transformers provides a [`Trainer`] class optimized for training 🤗 Transformers models, making it easier to start training without manually writing your own training loop. The [`Trainer`] provides API for hyperparameter search. This doc shows how to enable it in example. + +## Hyperparameter Search backend + +[`Trainer`] supports four hyperparameter search backends currently: +[optuna](https://optuna.org/), [sigopt](https://sigopt.com/), [raytune](https://docs.ray.io/en/latest/tune/index.html) and [wandb](https://wandb.ai/site/sweeps). + +you should install them before using them as the hyperparameter search backend +```bash +pip install optuna/sigopt/wandb/ray[tune] +``` + +## How to enable Hyperparameter search in example + +Define the hyperparameter search space, different backends need different format. + +For sigopt, see sigopt [object_parameter](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter), it's like following: +```py +>>> def sigopt_hp_space(trial): +... return [ +... {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"}, +... { +... "categorical_values": ["16", "32", "64", "128"], +... "name": "per_device_train_batch_size", +... "type": "categorical", +... }, +... ] +``` + +For optuna, see optuna [object_parameter](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html#sphx-glr-tutorial-10-key-features-002-configurations-py), it's like following: + +```py +>>> def optuna_hp_space(trial): +... return { +... "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), +... "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [16, 32, 64, 128]), +... } +``` + +For raytune, see raytune [object_parameter](https://docs.ray.io/en/latest/tune/api_docs/search_space.html), it's like following: + +```py +>>> def ray_hp_space(trial): +... return { +... "learning_rate": tune.loguniform(1e-6, 1e-4), +... "per_device_train_batch_size": tune.choice([16, 32, 64, 128]), +... } +``` + +For wandb, see wandb [object_parameter](https://docs.wandb.ai/guides/sweeps/configuration), it's like following: + +```py +>>> def wandb_hp_space(trial): +... return { +... "method": "random", +... "metric": {"name": "objective", "goal": "minimize"}, +... "parameters": { +... "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, +... "per_device_train_batch_size": {"values": [16, 32, 64, 128]}, +... }, +... } +``` + +Define a `model_init` function and pass it to the [`Trainer`], as an example: +```py +>>> def model_init(trial): +... return AutoModelForSequenceClassification.from_pretrained( +... model_args.model_name_or_path, +... from_tf=bool(".ckpt" in model_args.model_name_or_path), +... config=config, +... cache_dir=model_args.cache_dir, +... revision=model_args.model_revision, +... use_auth_token=True if model_args.use_auth_token else None, +... ) +``` + +Create a [`Trainer`] with your `model_init` function, training arguments, training and test datasets, and evaluation function: + +```py +>>> trainer = Trainer( +... model=None, +... args=training_args, +... train_dataset=small_train_dataset, +... eval_dataset=small_eval_dataset, +... compute_metrics=compute_metrics, +... tokenizer=tokenizer, +... model_init=model_init, +... data_collator=data_collator, +... ) +``` + +Call hyperparameter search, get the best trial parameters, backend could be `"optuna"`/`"sigopt"`/`"wandb"`/`"ray"`. direction can be`"minimize"` or `"maximize"`, which indicates whether to optimize greater or lower objective. + +You could define your own compute_objective function, if not defined, the default compute_objective will be called, and the sum of eval metric like f1 is returned as objective value. + +```py +>>> best_trial = trainer.hyperparameter_search( +... direction="maximize", +... backend="optuna", +... hp_space=optuna_hp_space, +... n_trials=20, +... compute_objective=compute_objective, +... ) +``` + +## Hyperparameter search For DDP refinetune +Currently, Hyperparameter search for DDP is enabled for optuna and sigopt. Only the rank-zero process will generate the search trial and pass the argument to other ranks. \ No newline at end of file From 226b0e46d50dc763dceeeab9682dc83ee70e8c6a Mon Sep 17 00:00:00 2001 From: wangxu Date: Tue, 27 Sep 2022 19:54:05 +0800 Subject: [PATCH 394/539] Add a use_parallel_residual argument to control the residual computing way (#18695) * Add a gpt_j_residual argument to control the residual computing way * Put duplicate code outside of the if block * Rename parameter "gpt_j_residual" to "use_parallel_residual" and set the default value to True --- .../models/gpt_neox/configuration_gpt_neox.py | 5 ++++ .../models/gpt_neox/modeling_gpt_neox.py | 28 +++++++++++++------ 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/gpt_neox/configuration_gpt_neox.py b/src/transformers/models/gpt_neox/configuration_gpt_neox.py index 8e906225c0d18a..3ce1bd78738cef 100644 --- a/src/transformers/models/gpt_neox/configuration_gpt_neox.py +++ b/src/transformers/models/gpt_neox/configuration_gpt_neox.py @@ -66,6 +66,9 @@ class GPTNeoXConfig(PretrainedConfig): use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. + use_parallel_residual (`bool`, *optional*, defaults to `True`): + Whether to use a "parallel" formulation in each Transformer layer, which can provide a slight training + speedup at large scales (e.g. 20B). Example: ```python @@ -99,6 +102,7 @@ def __init__( bos_token_id=0, eos_token_id=2, tie_word_embeddings=False, + use_parallel_residual=True, **kwargs ): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) @@ -115,3 +119,4 @@ def __init__( self.layer_norm_eps = layer_norm_eps self.use_cache = use_cache self.tie_word_embeddings = tie_word_embeddings + self.use_parallel_residual = use_parallel_residual diff --git a/src/transformers/models/gpt_neox/modeling_gpt_neox.py b/src/transformers/models/gpt_neox/modeling_gpt_neox.py index 4379ff747b3b1a..c1f29381f687f6 100755 --- a/src/transformers/models/gpt_neox/modeling_gpt_neox.py +++ b/src/transformers/models/gpt_neox/modeling_gpt_neox.py @@ -300,6 +300,7 @@ def forward(self, hidden_states): class GPTNeoXLayer(nn.Module): def __init__(self, config): super().__init__() + self.use_parallel_residual = config.use_parallel_residual self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.attention = GPTNeoXAttention(config) @@ -314,28 +315,37 @@ def forward( layer_past=None, output_attentions=False, ): - residual = hidden_states - ln_out = self.input_layernorm(hidden_states) + attention_layer_outputs = self.attention( - ln_out, + self.input_layernorm(hidden_states), attention_mask=attention_mask, layer_past=layer_past, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) - attn_output = attention_layer_outputs[0] # output_attn: a, present, (attentions) + attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights) outputs = attention_layer_outputs[1:] - mlp_output = self.mlp(self.post_attention_layernorm(hidden_states)) - hidden_states = mlp_output + attn_output + residual + if self.use_parallel_residual: + # pseudocode: + # x = x + attn(ln1(x)) + mlp(ln2(x)) + mlp_output = self.mlp(self.post_attention_layernorm(hidden_states)) + hidden_states = mlp_output + attn_output + hidden_states + else: + # pseudocode: + # x = x + attn(ln1(x)) + # x = x + mlp(ln2(x)) + attn_output = attn_output + hidden_states + mlp_output = self.mlp(self.post_attention_layernorm(attn_output)) + hidden_states = mlp_output + attn_output if use_cache: - outputs = (hidden_states,) + outputs + outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights) else: - outputs = (hidden_states,) + outputs[1:] + outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights) - return outputs # hidden_states, present, (attentions) + return outputs GPT_NEOX_START_DOCSTRING = r""" From e3a30e2b99e04e13b8540977775dec2567719e67 Mon Sep 17 00:00:00 2001 From: Nicola Procopio Date: Tue, 27 Sep 2022 14:55:41 +0200 Subject: [PATCH 395/539] translated add_new_pipeline (#19215) --- docs/source/it/_toctree.yml | 3 + docs/source/it/add_new_pipeline.mdx | 246 ++++++++++++++++++++++++++++ 2 files changed, 249 insertions(+) create mode 100644 docs/source/it/add_new_pipeline.mdx diff --git a/docs/source/it/_toctree.yml b/docs/source/it/_toctree.yml index ff0bc964a112cd..afc618dd6faab9 100644 --- a/docs/source/it/_toctree.yml +++ b/docs/source/it/_toctree.yml @@ -36,3 +36,6 @@ - local: debugging title: Debugging title: Guide pratiche +- sections: + - local: add_new_pipeline + title: Come aggiungere una pipeline a 🤗 Transformers? diff --git a/docs/source/it/add_new_pipeline.mdx b/docs/source/it/add_new_pipeline.mdx new file mode 100644 index 00000000000000..cf9acd2902fcfa --- /dev/null +++ b/docs/source/it/add_new_pipeline.mdx @@ -0,0 +1,246 @@ + + +# Come creare una pipeline personalizzata? + +In questa guida, scopriremo come creare una pipeline personalizzata e condividerla sull' [Hub](hf.co/models) o aggiungerla nella libreria +Transformers. + +Innanzitutto, è necessario decidere gli input grezzi che la pipeline sarà in grado di accettare. Possono essere strings, raw bytes, +dictionaries o qualsiasi cosa sia l'input desiderato più probabile. Cerca di mantenere questi input il più possibile in Python +in quanto facilita la compatibilità (anche con altri linguaggi tramite JSON). Questi saranno gli `inputs` della +pipeline (`preprocess`). + +Poi definire gli `outputs`. Stessa strategia degli `inputs`. Più è seplice e meglio è. Questi saranno gli output del metodo +`postprocess`. + +Si parte ereditando la classe base `Pipeline`. con i 4 metodi che bisogna implementare `preprocess`, +`_forward`, `postprocess` e `_sanitize_parameters`. + + +```python +from transformers import Pipeline + + +class MyPipeline(Pipeline): + def _sanitize_parameters(self, **kwargs): + preprocess_kwargs = {} + if "maybe_arg" in kwargs: + preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] + return preprocess_kwargs, {}, {} + + def preprocess(self, inputs, maybe_arg=2): + model_input = Tensor(inputs["input_ids"]) + return {"model_input": model_input} + + def _forward(self, model_inputs): + # model_inputs == {"model_input": model_input} + outputs = self.model(**model_inputs) + # Maybe {"logits": Tensor(...)} + return outputs + + def postprocess(self, model_outputs): + best_class = model_outputs["logits"].softmax(-1) + return best_class +``` + +La struttura di questa suddivisione consiste nel supportare in modo relativamente continuo CPU/GPU, supportando allo stesso tempo l'esecuzione di +pre/postelaborazione sulla CPU su thread diversi. + +`preprocess` prenderà gli input originariamente definiti e li trasformerà in qualcosa di alimentabile dal modello. Potrebbe +contenere più informazioni e di solito è un `Dict`. + +`_forward` è il dettaglio dell'implementazione e non è destinato a essere chiamato direttamente. `forward` è il metodo preferito per assicurarsi che tutto funzioni correttamente perchè contiene delle slavaguardie. Se qualcosa è +è collegato a un modello reale, appartiene al metodo `_forward`, tutto il resto è nel preprocess/postprocess. + +`postprocess` prende l'otput di `_forward` e lo trasforma nell'output finale che era stato deciso in precedenza. + +`_sanitize_parameters` esiste per consentire agli utenti di passare i parametri ogni volta che desiderano sia a inizialization time `pipeline(...., maybe_arg=4)` che al call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`. + +`_sanitize_parameters` ritorna 3 dicts di kwargs che vengono passati direttamente a `preprocess`, +`_forward` e `postprocess`. Non riempire nulla se il chiamante non ha chiamato con alcun parametro aggiuntivo. Questo +consente di mantenere gli argomenti predefiniti nella definizione della funzione, che è sempre più "naturale". + +Un esempio classico potrebbe essere l'argomento `top_k` nel post processing dei classification tasks. + +```python +>>> pipe = pipeline("my-new-task") +>>> pipe("This is a test") +[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05} +{"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}] + +>>> pipe("This is a test", top_k=2) +[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}] +``` + +In order to achieve that, we'll update our `postprocess` method with a default parameter to `5`. and edit +`_sanitize_parameters` to allow this new parameter. + + +```python +def postprocess(self, model_outputs, top_k=5): + best_class = model_outputs["logits"].softmax(-1) + # Add logic to handle top_k + return best_class + + +def _sanitize_parameters(self, **kwargs): + preprocess_kwargs = {} + if "maybe_arg" in kwargs: + preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] + + postprocess_kwargs = {} + if "top_k" in kwargs: + postprocess_kwargs["top_k"] = kwargs["top_k"] + return preprocess_kwargs, {}, postprocess_kwargs +``` + +Cercare di mantenere gli input/output molto semplici e idealmente serializzabili in JSON, in quanto ciò rende l'uso della pipeline molto facile +senza richiedere agli utenti di comprendere nuovi tipi di oggetti. È anche relativamente comune supportare molti tipi di argomenti +per facilitarne l'uso (ad esempio file audio, possono essere nomi di file, URL o byte puri). + +## Aggiungilo alla lista dei tasks supportati + +Per registrar il tuo `new-task` alla lista dei tasks supportati, devi aggiungerlo al `PIPELINE_REGISTRY`: + +```python +from transformers.pipelines import PIPELINE_REGISTRY + +PIPELINE_REGISTRY.register_pipeline( + "new-task", + pipeline_class=MyPipeline, + pt_model=AutoModelForSequenceClassification, +) +``` + +Puoi specificare il modello di default che desideri, in questo caso dovrebbe essere accompagnato da una revisione specifica (che può essere il nome di un branch o l'hash di un commit, in questo caso abbiamo preso `"abcdef"`) e anche dal type: + +```python +PIPELINE_REGISTRY.register_pipeline( + "new-task", + pipeline_class=MyPipeline, + pt_model=AutoModelForSequenceClassification, + default={"pt": ("user/awesome_model", "abcdef")}, + type="text", # current support type: text, audio, image, multimodal +) +``` + +## Condividi la tua pipeline sull'Hub + +Per condividere la tua pipeline personalizzata sull'Hub, devi solo salvare il codice della tua sottoclasse `Pipeline` in un file +python. Per esempio, supponiamo di voler utilizzare una pipeline personalizzata per la classificazione delle coppie di frasi come la seguente: + +```py +import numpy as np + +from transformers import Pipeline + + +def softmax(outputs): + maxes = np.max(outputs, axis=-1, keepdims=True) + shifted_exp = np.exp(outputs - maxes) + return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) + + +class PairClassificationPipeline(Pipeline): + def _sanitize_parameters(self, **kwargs): + preprocess_kwargs = {} + if "second_text" in kwargs: + preprocess_kwargs["second_text"] = kwargs["second_text"] + return preprocess_kwargs, {}, {} + + def preprocess(self, text, second_text=None): + return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) + + def _forward(self, model_inputs): + return self.model(**model_inputs) + + def postprocess(self, model_outputs): + logits = model_outputs.logits[0].numpy() + probabilities = softmax(logits) + + best_class = np.argmax(probabilities) + label = self.model.config.id2label[best_class] + score = probabilities[best_class].item() + logits = logits.tolist() + return {"label": label, "score": score, "logits": logits} +``` + +L'implementazione è agnostica al framework, e lavorerà sia con modelli PyTorch che con TensorFlow. Se l'abbiamo salvato in un file chiamato `pair_classification.py`, può essere successivamente importato e registrato in questo modo: + +```py +from pair_classification import PairClassificationPipeline +from transformers.pipelines import PIPELINE_REGISTRY +from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification + +PIPELINE_REGISTRY.register_pipeline( + "pair-classification", + pipeline_class=PairClassificationPipeline, + pt_model=AutoModelForSequenceClassification, + tf_model=TFAutoModelForSequenceClassification, +) +``` + +Una volta fatto, possiamo usarla con un modello pretrained. L'istanza `sgugger/finetuned-bert-mrpc` è stata +fine-tuned sul dataset MRPC, che classifica le coppie di frasi come parafrasi o no. + +```py +from transformers import pipeline + +classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc") +``` + +Successivamente possiamo condividerlo sull'Hub usando il metodo `save_pretrained` in un `Repository`: + +```py +from huggingface_hub import Repository + +repo = Repository("test-dynamic-pipeline", clone_from="{your_username}/test-dynamic-pipeline") +classifier.save_pretrained("test-dynamic-pipeline") +repo.push_to_hub() +``` + +Questo codice copierà il file dove è stato definitp `PairClassificationPipeline` all'interno della cartella `"test-dynamic-pipeline"`, +insieme al salvataggio del modello e del tokenizer della pipeline, prima di pushare il tutto nel repository +`{your_username}/test-dynamic-pipeline`. Dopodiché chiunque potrà utilizzarlo, purché fornisca l'opzione +`trust_remote_code=True`: + +```py +from transformers import pipeline + +classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True) +``` + +## Aggiungere la pipeline a Transformers + +Se vuoi contribuire con la tua pipeline a Transformers, dovrai aggiungere un modulo nel sottomodulo `pipelines` +con il codice della tua pipeline, quindi aggiungilo all'elenco dei tasks definiti in `pipelines/__init__.py`. + +Poi hai bisogno di aggiungere i test. Crea un nuovo file `tests/test_pipelines_MY_PIPELINE.py` con esempi ed altri test. + +La funzione `run_pipeline_test` sarà molto generica e su piccoli modelli casuali su ogni possibile +architettura, come definito da `model_mapping` e `tf_model_mapping`. + +Questo è molto importante per testare la compatibilità futura, nel senso che se qualcuno aggiunge un nuovo modello di +`XXXForQuestionAnswering` allora il test della pipeline tenterà di essere eseguito su di esso. Poiché i modelli sono casuali, è +è impossibile controllare i valori effettivi, per questo esiste un aiuto `ANY` che tenterà solamente di far corrispondere l'output della pipeline TYPE. + +Hai anche *bisogno* di implementare 2 (idealmente 4) test. + +- `test_small_model_pt` : Definire 1 piccolo modello per questa pipeline (non importa se i risultati non hanno senso) + e testare i risultati della pipeline. I risultati dovrebbero essere gli stessi di `test_small_model_tf`. +- `test_small_model_tf` : Definire 1 piccolo modello per questa pipeline (non importa se i risultati non hanno senso) + e testare i risultati della pipeline. I risultati dovrebbero essere gli stessi di `test_small_model_pt`. +- `test_large_model_pt` (`optional`): Testare la pipeline su una pipeline reale in cui i risultati dovrebbero avere + senso. Questi test sono lenti e dovrebbero essere contrassegnati come tali. In questo caso l'obiettivo è mostrare la pipeline e assicurarsi che non ci siano derive nelle versioni future +- `test_large_model_tf` (`optional`): Testare la pipeline su una pipeline reale in cui i risultati dovrebbero avere + senso. Questi test sono lenti e dovrebbero essere contrassegnati come tali. In questo caso l'obiettivo è mostrare la pipeline e assicurarsi + che non ci siano derive nelle versioni future \ No newline at end of file From 34be08efcd4d318785b3eac592f27a3d5dd2144b Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Tue, 27 Sep 2022 09:36:34 -0400 Subject: [PATCH 396/539] More tests for regression in cached non existence (#19216) * More tests for regression in cached non existence * Style --- tests/test_tokenization_common.py | 21 ++++++++++++++++++++- tests/utils/test_hub_utils.py | 15 +++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/tests/test_tokenization_common.py b/tests/test_tokenization_common.py index 48add3f4f9ce7d..537f5fb9bdfaf3 100644 --- a/tests/test_tokenization_common.py +++ b/tests/test_tokenization_common.py @@ -40,6 +40,7 @@ AutoTokenizer, BertTokenizer, BertTokenizerFast, + GPT2TokenizerFast, PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast, @@ -3884,12 +3885,30 @@ def test_cached_files_are_used_when_internet_is_down(self): # Download this model to make sure it's in the cache. _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") - # Under the mock environment we get a 500 error when trying to reach the model. + # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.request", return_value=response_mock) as mock_head: _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() + @require_tokenizers + def test_cached_files_are_used_when_internet_is_down_missing_files(self): + # A mock response for an HTTP head request to emulate server down + response_mock = mock.Mock() + response_mock.status_code = 500 + response_mock.headers = {} + response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} + + # Download this model to make sure it's in the cache. + _ = GPT2TokenizerFast.from_pretrained("gpt2") + + # Under the mock environment we get a 500 error when trying to reach the tokenizer. + with mock.patch("requests.request", return_value=response_mock) as mock_head: + _ = GPT2TokenizerFast.from_pretrained("gpt2") + # This check we did call the fake head request + mock_head.assert_called() + def test_legacy_load_from_one_file(self): # This test is for deprecated behavior and can be removed in v5 try: diff --git a/tests/utils/test_hub_utils.py b/tests/utils/test_hub_utils.py index f55a0ae4315c4a..c8c7d0faad70c1 100644 --- a/tests/utils/test_hub_utils.py +++ b/tests/utils/test_hub_utils.py @@ -15,8 +15,10 @@ import os import tempfile import unittest +import unittest.mock as mock from pathlib import Path +from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, @@ -79,6 +81,19 @@ def test_non_existence_is_cached(self): path = cached_file(RANDOM_BERT, "conf", local_files_only=True, _raise_exceptions_for_missing_entries=False) self.assertIsNone(path) + response_mock = mock.Mock() + response_mock.status_code = 500 + response_mock.headers = {} + response_mock.raise_for_status.side_effect = HTTPError + response_mock.json.return_value = {} + + # Under the mock environment we get a 500 error when trying to reach the tokenizer. + with mock.patch("requests.request", return_value=response_mock) as mock_head: + path = cached_file(RANDOM_BERT, "conf", _raise_exceptions_for_connection_errors=False) + self.assertIsNone(path) + # This check we did call the fake head request + mock_head.assert_called() + def test_has_file(self): self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only", WEIGHTS_NAME)) self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only", TF2_WEIGHTS_NAME)) From 2d956958252617a178a68a06582c99b133fe7d3d Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Tue, 27 Sep 2022 17:30:58 +0200 Subject: [PATCH 397/539] Use `math.pi` instead of `torch.pi` in `MaskFormer` (#19201) * Use math.pi Co-authored-by: ydshieh --- src/transformers/models/maskformer/modeling_maskformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/maskformer/modeling_maskformer.py b/src/transformers/models/maskformer/modeling_maskformer.py index 110b11c6532dd7..1b37fdd27c568b 100644 --- a/src/transformers/models/maskformer/modeling_maskformer.py +++ b/src/transformers/models/maskformer/modeling_maskformer.py @@ -2102,7 +2102,7 @@ def __init__( self.num_pos_feats = num_pos_feats self.temperature = temperature self.normalize = normalize - self.scale = 2 * torch.pi if scale is None else scale + self.scale = 2 * math.pi if scale is None else scale def forward(self, x: Tensor, mask: Optional[Tensor] = None) -> Tensor: if mask is None: From 2df602870b9a7403109b470322926b48e366e3d1 Mon Sep 17 00:00:00 2001 From: IMvision12 <88665786+IMvision12@users.noreply.github.com> Date: Wed, 28 Sep 2022 01:55:57 +0530 Subject: [PATCH 398/539] Added tests for yaml and json parser (#19219) * Added tests for yaml and json * Added tests for yaml and json --- src/transformers/hf_argparser.py | 6 +++-- tests/utils/test_hf_argparser.py | 42 ++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) diff --git a/src/transformers/hf_argparser.py b/src/transformers/hf_argparser.py index b74db2ee4ea1d4..06a10ff5a0554b 100644 --- a/src/transformers/hf_argparser.py +++ b/src/transformers/hf_argparser.py @@ -281,7 +281,9 @@ def parse_json_file(self, json_file: str, allow_extra_keys: bool = False) -> Tup - the dataclass instances in the same order as they were passed to the initializer. """ - outputs = self.parse_dict(json.loads(Path(json_file).read_text()), allow_extra_keys=allow_extra_keys) + open_json_file = open(Path(json_file)) + data = json.loads(open_json_file.read()) + outputs = self.parse_dict(data, allow_extra_keys=allow_extra_keys) return tuple(outputs) def parse_yaml_file(self, yaml_file: str, allow_extra_keys: bool = False) -> Tuple[DataClass, ...]: @@ -301,5 +303,5 @@ def parse_yaml_file(self, yaml_file: str, allow_extra_keys: bool = False) -> Tup - the dataclass instances in the same order as they were passed to the initializer. """ - outputs = self.parse_dict(yaml.safe_load(yaml_file), allow_extra_keys=allow_extra_keys) + outputs = self.parse_dict(yaml.safe_load(Path(yaml_file).read_text()), allow_extra_keys=allow_extra_keys) return tuple(outputs) diff --git a/tests/utils/test_hf_argparser.py b/tests/utils/test_hf_argparser.py index 827888509bb36b..9dfff75948cde6 100644 --- a/tests/utils/test_hf_argparser.py +++ b/tests/utils/test_hf_argparser.py @@ -13,12 +13,17 @@ # limitations under the License. import argparse +import json +import os +import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum +from pathlib import Path from typing import List, Optional +import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import string_to_bool @@ -258,6 +263,43 @@ def test_parse_dict_extra_key(self): self.assertRaises(ValueError, parser.parse_dict, args_dict, allow_extra_keys=False) + def test_parse_json(self): + parser = HfArgumentParser(BasicExample) + + args_dict_for_json = { + "foo": 12, + "bar": 3.14, + "baz": "42", + "flag": True, + } + with tempfile.TemporaryDirectory() as tmp_dir: + temp_local_path = os.path.join(tmp_dir, "temp_json") + os.mkdir(temp_local_path) + with open(temp_local_path + ".json", "w+") as f: + json.dump(args_dict_for_json, f) + parsed_args = parser.parse_yaml_file(Path(temp_local_path + ".json"))[0] + + args = BasicExample(**args_dict_for_json) + self.assertEqual(parsed_args, args) + + def test_parse_yaml(self): + parser = HfArgumentParser(BasicExample) + + args_dict_for_yaml = { + "foo": 12, + "bar": 3.14, + "baz": "42", + "flag": True, + } + with tempfile.TemporaryDirectory() as tmp_dir: + temp_local_path = os.path.join(tmp_dir, "temp_yaml") + os.mkdir(temp_local_path) + with open(temp_local_path + ".yaml", "w+") as f: + yaml.dump(args_dict_for_yaml, f) + parsed_args = parser.parse_yaml_file(Path(temp_local_path + ".yaml"))[0] + args = BasicExample(**args_dict_for_yaml) + self.assertEqual(parsed_args, args) + def test_integration_training_args(self): parser = HfArgumentParser(TrainingArguments) self.assertIsNotNone(parser) From 942fa8ced860775e4fad556f5bc4fb99b1eef618 Mon Sep 17 00:00:00 2001 From: Ankur Goyal Date: Wed, 28 Sep 2022 05:03:20 -0700 Subject: [PATCH 399/539] Fix small use_cache typo in the docs (#19191) --- src/transformers/generation_tf_utils.py | 2 +- src/transformers/generation_utils.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index 30319dcce389ed..ed2d7f33bfa1b1 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -475,7 +475,7 @@ def generate( [What are attention masks?](../glossary#attention-mask) decoder_start_token_id (`int`, *optional*): If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token. - use_cache: (`bool`, *optional*, defaults to `True`): + use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. output_attentions (`bool`, *optional*, defaults to `False`): diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 1509e92b2d3f12..0f9e8531670549 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1033,7 +1033,7 @@ def generate( as `input_ids` that masks the pad token. [What are attention masks?](../glossary#attention-mask) decoder_start_token_id (`int`, *optional*): If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token. - use_cache: (`bool`, *optional*, defaults to `True`): + use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. num_beam_groups (`int`, *optional*, defaults to `model.config.num_beam_groups` or 1 if the config does not set any value): From a357ed50e7144d6910dd924cb334518e90e59392 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Wed, 28 Sep 2022 13:07:08 +0100 Subject: [PATCH 400/539] Generate: add warning when left padding should be used (#19067) * add warning when left padding should be used * PT: check for pad token; FLAX: can only check while not tracing --- src/transformers/generation_flax_utils.py | 8 ++++++++ src/transformers/generation_tf_utils.py | 8 ++++++++ src/transformers/generation_utils.py | 8 ++++++++ 3 files changed, 24 insertions(+) diff --git a/src/transformers/generation_flax_utils.py b/src/transformers/generation_flax_utils.py index 51d00efc106855..fd362f33d3627f 100644 --- a/src/transformers/generation_flax_utils.py +++ b/src/transformers/generation_flax_utils.py @@ -326,6 +326,14 @@ def generate( if decoder_start_token_id is None and self.config.is_encoder_decoder: raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.") + # decoder-only models should use left-padding for generation (can't be checked with `trace=True`) + if not self.config.is_encoder_decoder and not trace: + if pad_token_id is not None and jnp.sum(input_ids[:, -1] == pad_token_id) > 0: + logger.warning( + "A decoder-only architecture is being used, but right-padding was detected! For correct " + "generation results, please set `padding_side='left'` when initializing the tokenizer." + ) + if self.config.is_encoder_decoder: # add encoder_outputs to model_kwargs if model_kwargs.get("encoder_outputs") is None: diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index ed2d7f33bfa1b1..4d39a81b7b7236 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -1605,6 +1605,14 @@ def _generate( input_ids, pad_token_id, eos_token_id ) + # decoder-only models should use left-padding for generation + if not self.config.is_encoder_decoder: + if pad_token_id is not None and tf.math.reduce_any(input_ids[:, -1] == pad_token_id): + logger.warning( + "A decoder-only architecture is being used, but right-padding was detected! For correct " + "generation results, please set `padding_side='left'` when initializing the tokenizer." + ) + # 4. Prepare model inputs which will be used for auto-regressive generation if self.config.is_encoder_decoder: # if encoder-decoder, we create encoder_outputs and add to `model_kwargs` diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 0f9e8531670549..ac2d031cc18310 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1229,6 +1229,14 @@ def generate( inputs_tensor, pad_token_id, eos_token_id ) + # decoder-only models should use left-padding for generation + if not self.config.is_encoder_decoder: + if pad_token_id is not None and torch.sum(inputs_tensor[:, -1] == pad_token_id) > 0: + logger.warning( + "A decoder-only architecture is being used, but right-padding was detected! For correct " + "generation results, please set `padding_side='left'` when initializing the tokenizer." + ) + if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: # if model is encoder decoder encoder_outputs are created # and added to `model_kwargs` From 22d37a9d2c685cc0d1ca33903fa9f00ca53a56a1 Mon Sep 17 00:00:00 2001 From: Gabriel Luiz Freitas Almeida Date: Wed, 28 Sep 2022 09:57:43 -0300 Subject: [PATCH 401/539] Fix deprecation warning for return_all_scores (#19217) * Improve deprecation warning for return_all_scores * Fix formatting --- src/transformers/pipelines/text_classification.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/transformers/pipelines/text_classification.py b/src/transformers/pipelines/text_classification.py index dd8de4c7357f2a..51eb6b36f5323b 100644 --- a/src/transformers/pipelines/text_classification.py +++ b/src/transformers/pipelines/text_classification.py @@ -87,7 +87,9 @@ def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, t postprocess_params["_legacy"] = False elif return_all_scores is not None: warnings.warn( - "`return_all_scores` is now deprecated, use `top_k=1` if you want similar functionnality", UserWarning + "`return_all_scores` is now deprecated, if want a similar funcionality use `top_k=None` instead of" + " `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.", + UserWarning, ) if return_all_scores: postprocess_params["top_k"] = None From de359c459397f0944587982dfcbaf2fa469343e2 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 28 Sep 2022 15:53:21 +0200 Subject: [PATCH 402/539] Fix doctest for `TFDeiTForImageClassification` (#19173) * Fix doctest for TFDeiTForImageClassification * Remove unnecessary tf.random.set_seed Co-authored-by: ydshieh --- src/transformers/models/deit/modeling_tf_deit.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/deit/modeling_tf_deit.py b/src/transformers/models/deit/modeling_tf_deit.py index ac1cc13e96470e..b7270abb655e7a 100644 --- a/src/transformers/models/deit/modeling_tf_deit.py +++ b/src/transformers/models/deit/modeling_tf_deit.py @@ -922,7 +922,7 @@ def call( >>> from PIL import Image >>> import requests - >>> tf.random.set_seed(3) # doctest: +IGNORE_RESULT + >>> tf.keras.utils.set_random_seed(3) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) @@ -937,7 +937,7 @@ def call( >>> # model predicts one of the 1000 ImageNet classes >>> predicted_class_idx = tf.math.argmax(logits, axis=-1)[0] >>> print("Predicted class:", model.config.id2label[int(predicted_class_idx)]) - Predicted class: ptarmigan + Predicted class: little blue heron, Egretta caerulea ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict From 9c6aeba3535898d06ffdbd9fff7fca093ec62fc2 Mon Sep 17 00:00:00 2001 From: Nick Doiron Date: Wed, 28 Sep 2022 10:45:05 -0400 Subject: [PATCH 403/539] Document and validate typical_p in generation (#19128) * Document and validate typical_p in generation --- src/transformers/generation_logits_process.py | 13 +++++++++++++ src/transformers/generation_utils.py | 3 +++ 2 files changed, 16 insertions(+) diff --git a/src/transformers/generation_logits_process.py b/src/transformers/generation_logits_process.py index 35ca6c57311d63..0e914940546c3b 100644 --- a/src/transformers/generation_logits_process.py +++ b/src/transformers/generation_logits_process.py @@ -236,6 +236,19 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> to class TypicalLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] that performs typical decoding. See [Typical Decoding for Natural Language + Generation](https://arxiv.org/abs/2202.00666) for more information. + + Args: + mass (`float`): + Value of typical_p between 0 and 1 inclusive, defaults to 0.9. + filter_value (`float`, *optional*, defaults to `-float("Inf")`): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + """ + def __init__(self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): mass = float(mass) if not (mass > 0 and mass < 1): diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index ac2d031cc18310..71db5532ea38de 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1486,6 +1486,9 @@ def generate( if stopping_criteria.max_length is None: raise ValueError("`max_length` needs to be a stopping_criteria for now.") + if typical_p is not None: + raise ValueError("Decoder argument `typical_p` is not supported with beam groups.") + # 10. prepare beam search scorer beam_scorer = BeamSearchScorer( batch_size=batch_size, From 4a0b958d61f2c99a1cfb3b0d146596efafa9aa58 Mon Sep 17 00:00:00 2001 From: Tatsuki Okada <92259109+iamtatsuki05@users.noreply.github.com> Date: Wed, 28 Sep 2022 23:55:46 +0900 Subject: [PATCH 404/539] Fix trainer seq2seq qa.py evaluate log and ft script (#19208) * fix args option * fix trainer eval log * fix out of memory qa script * do isort, black, flake * fix tokenize target * take it back. * fix: comment --- .../question-answering/run_seq2seq_qa.py | 21 +++++++++++++----- .../question-answering/trainer_seq2seq_qa.py | 22 ++++++++++++++----- 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index 078b58dfdf0ec4..4f2faeecbc1bd1 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -327,21 +327,28 @@ def main(): if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( - data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir + data_args.dataset_name, + data_args.dataset_config_name, + cache_dir=model_args.cache_dir, + use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] - if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] - raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir) + raw_datasets = load_dataset( + extension, + data_files=data_files, + cache_dir=model_args.cache_dir, + use_auth_token=True if model_args.use_auth_token else None, + ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. @@ -359,7 +366,7 @@ def main(): tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, - use_fast=True, + use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) @@ -476,9 +483,10 @@ def preprocess_validation_function(examples): max_length=max_seq_length, padding=padding, truncation=True, - return_overflowing_tokens=True, return_offsets_mapping=True, + return_overflowing_tokens=True, ) + # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True) @@ -503,6 +511,7 @@ def preprocess_validation_function(examples): ] model_inputs["labels"] = labels["input_ids"] + return model_inputs if training_args.do_train: @@ -627,7 +636,7 @@ def post_processing_function( eval_examples=eval_examples if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, - compute_metrics=compute_metrics, + compute_metrics=compute_metrics if training_args.predict_with_generate else None, post_process_function=post_processing_function, ) diff --git a/examples/pytorch/question-answering/trainer_seq2seq_qa.py b/examples/pytorch/question-answering/trainer_seq2seq_qa.py index 6ad66aeec5b40b..ab46435062f9eb 100644 --- a/examples/pytorch/question-answering/trainer_seq2seq_qa.py +++ b/examples/pytorch/question-answering/trainer_seq2seq_qa.py @@ -15,12 +15,14 @@ """ A subclass of `Trainer` specific to Question-Answering tasks """ +import math +import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import Seq2SeqTrainer, is_torch_tpu_available -from transformers.trainer_utils import PredictionOutput +from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): @@ -59,6 +61,7 @@ def evaluate( # Temporarily disable metric computation, we will do it in the loop here. compute_metrics = self.compute_metrics self.compute_metrics = None + start_time = time.time() eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: output = eval_loop( @@ -71,6 +74,15 @@ def evaluate( ) finally: self.compute_metrics = compute_metrics + total_batch_size = self.args.eval_batch_size * self.args.world_size + output.metrics.update( + speed_metrics( + metric_key_prefix, + start_time, + num_samples=output.num_samples, + num_steps=math.ceil(output.num_samples / total_batch_size), + ) + ) if self.post_process_function is not None and self.compute_metrics is not None: eval_preds = self.post_process_function(eval_examples, eval_dataset, output) @@ -81,15 +93,15 @@ def evaluate( if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) - self.log(metrics) - else: - metrics = {} + output.metrics.update(metrics) + + self.log(metrics) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) - self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) + self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) return metrics def predict( From 64998a57fbb4d6069c1ce76d7f4c1bf0cd348220 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Wed, 28 Sep 2022 18:26:12 +0200 Subject: [PATCH 405/539] Fix cache names in CircleCI jobs (#19223) Co-authored-by: ydshieh --- .circleci/config.yml | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e911a2f4d0e486..0143cdeb77a5e0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -128,7 +128,7 @@ jobs: - restore_cache: keys: - v0.5-torch_and_tf-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-torch_and_tf- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs - run: git lfs install - run: pip install --upgrade pip @@ -138,7 +138,7 @@ jobs: - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate - save_cache: - key: v0.5-{{ checksum "setup.py" }} + key: v0.5-torch_and_tf-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt @@ -170,7 +170,7 @@ jobs: - restore_cache: keys: - v0.5-torch_and_flax-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-torch_and_flax- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision] @@ -178,7 +178,7 @@ jobs: - run: pip install https://github.com/kpu/kenlm/archive/master.zip - run: pip install git+https://github.com/huggingface/accelerate - save_cache: - key: v0.5-{{ checksum "setup.py" }} + key: v0.5-torch_and_flax-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt @@ -209,7 +209,7 @@ jobs: - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-torch- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time - run: pip install --upgrade pip - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] @@ -248,7 +248,7 @@ jobs: - restore_cache: keys: - v0.5-tf-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-tf- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision] @@ -286,7 +286,7 @@ jobs: - restore_cache: keys: - v0.5-flax-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-flax- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[flax,testing,sentencepiece,flax-speech,vision] @@ -324,7 +324,7 @@ jobs: - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-torch- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm] @@ -363,7 +363,7 @@ jobs: - restore_cache: keys: - v0.5-tf-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-tf- - run: pip install --upgrade pip - run: pip install .[sklearn,tf-cpu,testing,sentencepiece] - run: pip install tensorflow_probability @@ -397,7 +397,7 @@ jobs: - restore_cache: keys: - v0.5-custom_tokenizers-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-custom_tokenizers- - run: pip install --upgrade pip - run: pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba] - run: python -m unidic download @@ -433,7 +433,7 @@ jobs: - restore_cache: keys: - v0.5-torch_examples-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-torch_examples- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng - run: pip install --upgrade pip - run: pip install .[sklearn,torch,sentencepiece,testing,torch-speech] @@ -470,7 +470,7 @@ jobs: - restore_cache: keys: - v0.5-tensorflow_examples-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-tensorflow_examples- - run: pip install --upgrade pip - run: pip install .[sklearn,tensorflow,sentencepiece,testing] - run: pip install -r examples/tensorflow/_tests_requirements.txt @@ -506,7 +506,7 @@ jobs: - restore_cache: keys: - v0.5-flax_examples-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-flax_examples- - run: pip install --upgrade pip - run: pip install .[flax,testing,sentencepiece] - run: pip install -r examples/flax/_tests_requirements.txt @@ -543,7 +543,7 @@ jobs: - restore_cache: keys: - v0.5-hub-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-hub- - run: sudo apt-get -y update && sudo apt-get install git-lfs - run: | git config --global user.email "ci@dummy.com" @@ -581,8 +581,8 @@ jobs: fi - restore_cache: keys: - - v0.5-torch-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-onnx-{{ checksum "setup.py" }} + - v0.5-onnx- - run: pip install --upgrade pip - run: pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba] - save_cache: @@ -610,7 +610,7 @@ jobs: - restore_cache: keys: - v0.5-code_quality-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-code_quality- - run: pip install --upgrade pip - run: pip install .[all,quality] - save_cache: @@ -639,7 +639,7 @@ jobs: - restore_cache: keys: - v0.5-repository_consistency-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-repository_consistency- - run: pip install --upgrade pip - run: pip install .[all,quality] - save_cache: @@ -678,7 +678,7 @@ jobs: - restore_cache: keys: - v0.5-torch-{{ checksum "setup.py" }} - - v0.5-{{ checksum "setup.py" }} + - v0.5-torch- - run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev - run: pip install --upgrade pip - run: pip install .[torch,testing,vision] From 0fc68a7e14b1e6450829e7be76f74abbc84f051e Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Wed, 28 Sep 2022 15:45:49 -0400 Subject: [PATCH 406/539] Fix seq2seq QA example --- examples/pytorch/question-answering/run_seq2seq_qa.py | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index 4f2faeecbc1bd1..abcc11fbb3edb7 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -346,6 +346,7 @@ def main(): raw_datasets = load_dataset( extension, data_files=data_files, + field="data", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) From 990936a8689e4a129690ca31177d342f7dabbadb Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Wed, 28 Sep 2022 17:09:29 -0700 Subject: [PATCH 407/539] Move AutoClasses under Main Classes (#19163) * move autoclasses to main classes * keep auto.mdx in model_doc --- docs/source/en/_toctree.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 5e6a0f9a309a60..5e2d25ee3c4d11 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -145,6 +145,8 @@ title: Conceptual guides - sections: - sections: + - local: model_doc/auto + title: Auto Classes - local: main_classes/callback title: Callbacks - local: main_classes/configuration @@ -179,8 +181,6 @@ title: Feature Extractor title: Main Classes - sections: - - local: model_doc/auto - title: Auto Classes - isExpanded: false sections: - local: model_doc/albert From 6957350c2ba26eaeff3f5f0c6cc92fba65dbdd57 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Wed, 28 Sep 2022 17:09:44 -0700 Subject: [PATCH 408/539] Focus doc around preprocessing classes (#18768) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 📝 reframe docs around preprocessing classes * small edits * edits and review * fix typo * apply review * clarify processor --- docs/source/en/preprocessing.mdx | 150 ++++++++++++++----------------- 1 file changed, 69 insertions(+), 81 deletions(-) diff --git a/docs/source/en/preprocessing.mdx b/docs/source/en/preprocessing.mdx index d710d41f660286..de0eed152d9bdf 100644 --- a/docs/source/en/preprocessing.mdx +++ b/docs/source/en/preprocessing.mdx @@ -14,17 +14,29 @@ specific language governing permissions and limitations under the License. [[open-in-colab]] -Before you can use your data in a model, the data needs to be processed into an acceptable format for the model. A model does not understand raw text, images or audio. These inputs need to be converted into numbers and assembled into tensors. In this tutorial, you will: +Before you can train a model on a dataset, it needs to be preprocessed into the expected model input format. Whether your data is text, images, or audio, they need to be converted and assembled into batches of tensors. 🤗 Transformers provides a set of preprocessing classes to help prepare your data for the model. In this tutorial, you'll learn that for: -* Preprocess textual data with a tokenizer. -* Preprocess image or audio data with a feature extractor. -* Preprocess data for a multimodal task with a processor. +* Text, use a [Tokenizer](./main_classes/tokenizer) to convert text into a sequence of tokens, create a numerical representation of the tokens, and assemble them into tensors. +* Computer vision and speech, use a [Feature extractor](./main_classes/feature_extractor) to extract sequential features from audio waveforms and images and convert them into tensors. +* Multimodal inputs, use a [Processor](./main_classes/processors) to combine a tokenizer and a feature extractor. -## NLP + + +`AutoProcessor` **always** works and automatically chooses the correct class for the model you're using, whether you're using a tokenizer, feature extractor or processor. + + + +Before you begin, install 🤗 Datasets so you can load some datasets to experiment with: + +```bash +pip install datasets +``` + +## Natural Language Processing -The main tool for processing textual data is a [tokenizer](main_classes/tokenizer). A tokenizer starts by splitting text into *tokens* according to a set of rules. The tokens are converted into numbers, which are used to build tensors as input to a model. Any additional inputs required by a model are also added by the tokenizer. +The main tool for preprocessing textual data is a [tokenizer](main_classes/tokenizer). A tokenizer splits text into *tokens* according to a set of rules. The tokens are converted into numbers and then tensors, which become the model inputs. Any additional inputs required by the model are added by the tokenizer. @@ -32,11 +44,7 @@ If you plan on using a pretrained model, it's important to use the associated pr -Get started quickly by loading a pretrained tokenizer with the [`AutoTokenizer`] class. This downloads the *vocab* used when a model is pretrained. - -### Tokenize - -Load a pretrained tokenizer with [`AutoTokenizer.from_pretrained`]: +Get started by loading a pretrained tokenizer with the [`AutoTokenizer.from_pretrained`] method. This downloads the *vocab* a model was pretrained with: ```py >>> from transformers import AutoTokenizer @@ -44,7 +52,7 @@ Load a pretrained tokenizer with [`AutoTokenizer.from_pretrained`]: >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") ``` -Then pass your sentence to the tokenizer: +Then pass your text to the tokenizer: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") @@ -60,7 +68,7 @@ The tokenizer returns a dictionary with three important items: * [attention_mask](glossary#attention-mask) indicates whether a token should be attended to or not. * [token_type_ids](glossary#token-type-ids) identifies which sequence a token belongs to when there is more than one sequence. -You can decode the `input_ids` to return the original input: +Return your input by decoding the `input_ids`: ```py >>> tokenizer.decode(encoded_input["input_ids"]) @@ -68,9 +76,9 @@ You can decode the `input_ids` to return the original input: ``` As you can see, the tokenizer added two special tokens - `CLS` and `SEP` (classifier and separator) - to the sentence. Not all models need -special tokens, but if they do, the tokenizer will automatically add them for you. +special tokens, but if they do, the tokenizer automatically adds them for you. -If there are several sentences you want to process, pass the sentences as a list to the tokenizer: +If there are several sentences you want to preprocess, pass them as a list to the tokenizer: ```py >>> batch_sentences = [ @@ -93,7 +101,7 @@ If there are several sentences you want to process, pass the sentences as a list ### Pad -This brings us to an important topic. When you process a batch of sentences, they aren't always the same length. This is a problem because tensors, the input to the model, need to have a uniform shape. Padding is a strategy for ensuring tensors are rectangular by adding a special *padding token* to sentences with fewer tokens. +Sentences aren't always the same length which can be an issue because tensors, the model inputs, need to have a uniform shape. Padding is a strategy for ensuring tensors are rectangular by adding a special *padding token* to shorter sentences. Set the `padding` parameter to `True` to pad the shorter sequences in the batch to match the longest sequence: @@ -116,11 +124,11 @@ Set the `padding` parameter to `True` to pad the shorter sequences in the batch [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` -Notice the tokenizer padded the first and third sentences with a `0` because they are shorter! +The first and third sentences are now padded with `0`'s because they are shorter. ### Truncation -On the other end of the spectrum, sometimes a sequence may be too long for a model to handle. In this case, you will need to truncate the sequence to a shorter length. +On the other end of the spectrum, sometimes a sequence may be too long for a model to handle. In this case, you'll need to truncate the sequence to a shorter length. Set the `truncation` parameter to `True` to truncate a sequence to the maximum length accepted by the model: @@ -143,9 +151,15 @@ Set the `truncation` parameter to `True` to truncate a sequence to the maximum l [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` + + +Check out the [Padding and truncation](./pad_truncation) concept guide to learn more different padding and truncation arguments. + + + ### Build tensors -Finally, you want the tokenizer to return the actual tensors that are fed to the model. +Finally, you want the tokenizer to return the actual tensors that get fed to the model. Set the `return_tensors` parameter to either `pt` for PyTorch, or `tf` for TensorFlow: @@ -199,13 +213,9 @@ array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], ## Audio -Audio inputs are preprocessed differently than textual inputs, but the end goal remains the same: create numerical sequences the model can understand. A [feature extractor](main_classes/feature_extractor) is designed for the express purpose of extracting features from raw image or audio data and converting them into tensors. Before you begin, install 🤗 Datasets to load an audio dataset to experiment with: +For audio tasks, you'll need a [feature extractor](main_classes/feature_extractor) to prepare your dataset for the model. The feature extractor is designed to extract features from raw audio data, and convert them into tensors. -```bash -pip install datasets -``` - -Load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html) for more details on how to load a dataset): +Load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html) for more details on how to load a dataset) to see how you can use a feature extractor with audio datasets: ```py >>> from datasets import load_dataset, Audio @@ -213,7 +223,7 @@ Load the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset (see >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` -Access the first element of the `audio` column to take a look at the input. Calling the `audio` column will automatically load and resample the audio file: +Access the first element of the `audio` column to take a look at the input. Calling the `audio` column automatically loads and resamples the audio file: ```py >>> dataset[0]["audio"] @@ -229,20 +239,7 @@ This returns three items: * `path` points to the location of the audio file. * `sampling_rate` refers to how many data points in the speech signal are measured per second. -### Resample - -For this tutorial, you will use the [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) model. As you can see from the model card, the Wav2Vec2 model is pretrained on 16kHz sampled speech audio. It is important your audio data's sampling rate matches the sampling rate of the dataset used to pretrain the model. If your data's sampling rate isn't the same, then you need to resample your audio data. - -For example, the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset has a sampling rate of 8000kHz. In order to use the Wav2Vec2 model with this dataset, upsample the sampling rate to 16kHz: - -```py ->>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ->>> dataset[0]["audio"] -{'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, - 0. , 0. ], dtype=float32), - 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', - 'sampling_rate': 8000} -``` +For this tutorial, you'll use the [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) model. Take a look at the model card, and you'll learn Wav2Vec2 is pretrained on 16kHz sampled speech audio. It is important your audio data's sampling rate matches the sampling rate of the dataset used to pretrain the model. If your data's sampling rate isn't the same, then you need to resample your data. 1. Use 🤗 Datasets' [`~datasets.Dataset.cast_column`] method to upsample the sampling rate to 16kHz: @@ -250,7 +247,7 @@ For example, the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) data >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` -2. Load the audio file: +2. Call the `audio` column again to resample the audio file: ```py >>> dataset[0]["audio"] @@ -260,11 +257,7 @@ For example, the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) data 'sampling_rate': 16000} ``` -As you can see, the `sampling_rate` is now 16kHz! - -### Feature extractor - -The next step is to load a feature extractor to normalize and pad the input. When padding textual data, a `0` is added for shorter sequences. The same idea applies to audio data, and the audio feature extractor will add a `0` - interpreted as silence - to `array`. +Next, load a feature extractor to normalize and pad the input. When padding textual data, a `0` is added for shorter sequences. The same idea applies to audio data. The feature extractor adds a `0` - interpreted as silence - to `array`. Load the feature extractor with [`AutoFeatureExtractor.from_pretrained`]: @@ -283,8 +276,6 @@ Pass the audio `array` to the feature extractor. We also recommend adding the `s 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` -### Pad and truncate - Just like the tokenizer, you can apply padding or truncation to handle variable sequences in a batch. Take a look at the sequence length of these two audio samples: ```py @@ -295,7 +286,7 @@ Just like the tokenizer, you can apply padding or truncation to handle variable (106496,) ``` -As you can see, the first sample has a longer sequence than the second sample. Let's create a function that will preprocess the dataset. Specify a maximum sample length, and the feature extractor will either pad or truncate the sequences to match it: +Create a function to preprocess the dataset so the audio samples are the same lengths. Specify a maximum sample length, and the feature extractor will either pad or truncate the sequences to match it: ```py >>> def preprocess_function(examples): @@ -310,13 +301,13 @@ As you can see, the first sample has a longer sequence than the second sample. L ... return inputs ``` -Apply the function to the the first few examples in the dataset: +Apply the `preprocess_function` to the the first few examples in the dataset: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` -Now take another look at the processed sample lengths: +The sample lengths are now the same and match the specified maximum length. You can pass your processed dataset to the model now! ```py >>> processed_dataset["input_values"][0].shape @@ -326,13 +317,17 @@ Now take another look at the processed sample lengths: (100000,) ``` -The lengths of the first two samples now match the maximum length you specified. +## Computer vision + +For computer vision tasks, you'll need a [feature extractor](main_classes/feature_extractor) to prepare your dataset for the model. The feature extractor is designed to extract features from images, and convert them into tensors. -## Vision +Load the [food101](https://huggingface.co/datasets/food101) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html) for more details on how to load a dataset) to see how you can use a feature extractor with computer vision datasets: -A feature extractor is also used to process images for vision tasks. Once again, the goal is to convert the raw image into a batch of tensors as input. + + +Use 🤗 Datasets `split` parameter to only load a small sample from the training split since the dataset is quite large! -Let's load the [food101](https://huggingface.co/datasets/food101) dataset for this tutorial. Use 🤗 Datasets `split` parameter to only load a small sample from the training split since the dataset is quite large: + ```py >>> from datasets import load_dataset @@ -346,9 +341,9 @@ Next, take a look at the image with 🤗 Datasets [`Image`](https://huggingface. >>> dataset[0]["image"] ``` -![vision-preprocess-tutorial.png](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png) - -### Feature extractor +
+ +
Load the feature extractor with [`AutoFeatureExtractor.from_pretrained`]: @@ -358,11 +353,9 @@ Load the feature extractor with [`AutoFeatureExtractor.from_pretrained`]: >>> feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224") ``` -### Data augmentation - -For vision tasks, it is common to add some type of data augmentation to the images as a part of preprocessing. You can add augmentations with any library you'd like, but in this tutorial, you will use torchvision's [`transforms`](https://pytorch.org/vision/stable/transforms.html) module. +For computer vision tasks, it is common to add some type of data augmentation to the images as a part of preprocessing. You can add augmentations with any library you'd like, but in this tutorial, you'll use torchvision's [`transforms`](https://pytorch.org/vision/stable/transforms.html) module. If you're interested in using another data augmentation library, learn how in the [Albumentations](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) or [Kornia notebooks](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb). -1. Normalize the image and use [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html) to chain some transforms - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) and [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html) - together: +1. Normalize the image with the feature extractor and use [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html) to chain some transforms - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) and [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html) - together: ```py >>> from torchvision.transforms import Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor @@ -373,7 +366,7 @@ For vision tasks, it is common to add some type of data augmentation to the imag ... ) ``` -2. The model accepts [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) as it's input. This value is generated by the feature extractor. Create a function that generates `pixel_values` from the transforms: +2. The model accepts [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) as its input, which is generated by the feature extractor. Create a function that generates `pixel_values` from the transforms: ```py >>> def transforms(examples): @@ -381,13 +374,13 @@ For vision tasks, it is common to add some type of data augmentation to the imag ... return examples ``` -3. Then use 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform) to apply the transforms on-the-fly: +3. Then use 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform) to apply the transforms on the fly: ```py >>> dataset.set_transform(transforms) ``` -4. Now when you access the image, you will notice the feature extractor has added the model input `pixel_values`: +4. Now when you access the image, you'll notice the feature extractor has added `pixel_values`. You can pass your processed dataset to the model now! ```py >>> dataset[0]["image"] @@ -418,7 +411,7 @@ For vision tasks, it is common to add some type of data augmentation to the imag [-0.1922, -0.1922, -0.1922, ..., -0.2941, -0.2863, -0.3412]]])} ``` -Here is what the image looks like after you preprocess it. Just as you'd expect from the applied transforms, the image has been randomly cropped and it's color properties are different. +Here is what the image looks like after the transforms are applied. The image has been randomly cropped and it's color properties are different. ```py >>> import numpy as np @@ -428,16 +421,15 @@ Here is what the image looks like after you preprocess it. Just as you'd expect >>> plt.imshow(img.permute(1, 2, 0)) ``` -![preprocessed_image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png) +
+ +
## Multimodal -For multimodal tasks. you will use a combination of everything you've learned so far and apply your skills to a automatic speech recognition (ASR) task. This means you will need a: - -* Feature extractor to preprocess the audio data. -* Tokenizer to process the text. +For tasks involving multimodal inputs, you'll need a [processor](main_classes/processors) to prepare your dataset for the model. A processor couples a tokenizer and feature extractor. -Let's return to the [LJ Speech](https://huggingface.co/datasets/lj_speech) dataset: +Load the [LJ Speech](https://huggingface.co/datasets/lj_speech) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html) for more details on how to load a dataset) to see how you can use a processor for automatic speech recognition (ASR): ```py >>> from datasets import load_dataset @@ -445,7 +437,7 @@ Let's return to the [LJ Speech](https://huggingface.co/datasets/lj_speech) datas >>> lj_speech = load_dataset("lj_speech", split="train") ``` -Since you are mainly interested in the `audio` and `text` column, remove the other columns: +For ASR, you're mainly focused on `audio` and `text` so you can remove the other columns: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) @@ -464,15 +456,13 @@ Now take a look at the `audio` and `text` columns: 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` -Remember from the earlier section on processing audio data, you should always [resample](preprocessing#audio) your audio data's sampling rate to match the sampling rate of the dataset used to pretrain a model: +Remember you should always [resample](preprocessing#audio) your audio dataset's sampling rate to match the sampling rate of the dataset used to pretrain a model! ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` -### Processor - -A processor combines a feature extractor and tokenizer. Load a processor with [`AutoProcessor.from_pretrained]: +Load a processor with [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor @@ -480,7 +470,7 @@ A processor combines a feature extractor and tokenizer. Load a processor with [` >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` -1. Create a function to process the audio data to `input_values`, and tokenizes the text to `labels`. These are your inputs to the model: +1. Create a function to process the audio data contained in `array` to `input_values`, and tokenize `text` to `labels`. These are the inputs to the model: ```py >>> def prepare_dataset(example): @@ -497,6 +487,4 @@ A processor combines a feature extractor and tokenizer. Load a processor with [` >>> prepare_dataset(lj_speech[0]) ``` -Notice the processor has added `input_values` and `labels`. The sampling rate has also been correctly downsampled to 16kHz. - -Awesome, you should now be able to preprocess data for any modality and even combine different modalities! In the next tutorial, learn how to fine-tune a model on your newly preprocessed data. +The processor has now added `input_values` and `labels`, and the sampling rate has also been correctly downsampled to 16kHz. You can pass your processed dataset to the model now! \ No newline at end of file From 99c32493e025ffd2dbbd104663f6d83481bc70ce Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 29 Sep 2022 08:36:46 +0200 Subject: [PATCH 409/539] Fix confusing working directory in Push CI (#19234) Co-authored-by: ydshieh --- .github/workflows/self-push.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index af00314fa93fce..b09e7046c84dd9 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -96,11 +96,6 @@ jobs: echo "env.CI_BRANCH = ${{ env.CI_BRANCH }}" echo "env.CI_SHA = ${{ env.CI_SHA }}" - - name: Checkout transformers - uses: actions/checkout@v2 - with: - fetch-depth: 2 - - name: Update clone using environment variables working-directory: /transformers run: | @@ -111,12 +106,14 @@ jobs: echo "log = $(git log -n 1)" - name: Cleanup + working-directory: /transformers run: | rm -rf tests/__pycache__ rm -rf tests/models/__pycache__ rm -rf reports - name: Fetch the tests to run + working-directory: /transformers # TODO: add `git-python` in the docker images run: | pip install --upgrade git-python @@ -126,10 +123,11 @@ jobs: uses: actions/upload-artifact@v2 with: name: test_fetched - path: test_preparation.txt + path: /transformers/test_preparation.txt - id: set-matrix name: Organize tests into models + working-directory: /transformers # The `keys` is used as GitHub actions matrix for jobs, i.e. `models/bert`, `tokenization`, `pipeline`, etc. # The `test_map` is used to get the actual identified test files under each key. # If no test to run (so no `test_map.json` file), create a dummy map (empty matrix will fail) From 9d732fd2dd99cd5c353a6e50c2fc5059d99e1172 Mon Sep 17 00:00:00 2001 From: Gabriele Sarti Date: Thu, 29 Sep 2022 04:42:07 -0400 Subject: [PATCH 410/539] XGLM - Fix Softmax NaNs when using FP16 (#18057) * fix fp16 for xglm * Removed misleading comment * Fix undefined variable Co-authored-by: Gabriele Sarti Co-authored-by: ydshieh Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> --- src/transformers/models/xglm/modeling_xglm.py | 8 +++++-- tests/models/xglm/test_modeling_xglm.py | 21 ++++++++++++++++++- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/xglm/modeling_xglm.py b/src/transformers/models/xglm/modeling_xglm.py index 6717d8d8e1528d..15c1f53f3008c7 100755 --- a/src/transformers/models/xglm/modeling_xglm.py +++ b/src/transformers/models/xglm/modeling_xglm.py @@ -235,7 +235,6 @@ def create_position_ids_from_inputs_embeds(self, inputs_embeds, past_key_values_ return position_ids.unsqueeze(0).expand(input_shape).contiguous() + past_key_values_length -# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->XGLM class XGLMAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" @@ -338,9 +337,14 @@ def forward( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - attn_weights = nn.functional.softmax(attn_weights, dim=-1) + # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 + if attn_weights.dtype == torch.float16: + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16) + else: + attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): diff --git a/tests/models/xglm/test_modeling_xglm.py b/tests/models/xglm/test_modeling_xglm.py index 6d40ddab8eb2cc..4a9a5ce2144825 100644 --- a/tests/models/xglm/test_modeling_xglm.py +++ b/tests/models/xglm/test_modeling_xglm.py @@ -18,7 +18,7 @@ import unittest from transformers import XGLMConfig, is_torch_available -from transformers.testing_utils import require_torch, slow, torch_device +from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_generation_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester @@ -468,3 +468,22 @@ def test_xglm_sample_max_time(self): model.generate(input_ids, do_sample=False, max_time=None, max_length=256) duration = datetime.datetime.now() - start self.assertGreater(duration, datetime.timedelta(seconds=1.25 * MAX_TIME)) + + @require_torch_gpu + def test_batched_nan_fp16(self): + model_name = "facebook/xglm-564M" + tokenizer = XGLMTokenizer.from_pretrained(model_name, use_fast=False, padding_side="left") + + model = XGLMForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, use_cache=True).cuda() + model = model.eval() + + batch = tokenizer(["Who are you?", "Joe Biden is the president of"], padding=True, return_tensors="pt") + + input_ids = batch["input_ids"].cuda() + attention_mask = batch["attention_mask"].cuda() + + with torch.no_grad(): + outputs = model(input_ids, attention_mask=attention_mask) + self.assertFalse( + torch.isnan(outputs.logits[0]).any().item() + ) # the first logits could contain NaNs if it fails From bb6fa06f2d4e078837a982f522d6f6f62ba83f11 Mon Sep 17 00:00:00 2001 From: Michael Benayoun Date: Thu, 29 Sep 2022 11:04:49 +0200 Subject: [PATCH 411/539] Add a getattr method, which replaces _module_getattr in torch.fx.Tracer from PyTorch 1.13+ (#19233) --- src/transformers/utils/fx.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/transformers/utils/fx.py b/src/transformers/utils/fx.py index d3255baf847061..9fd66df175f9d8 100644 --- a/src/transformers/utils/fx.py +++ b/src/transformers/utils/fx.py @@ -862,11 +862,12 @@ def create_proxy(self, kind, target, args, kwargs, name=None, type_expr=None, pr return rv + # Replaced by .getattr from PyTorch 1.13 def _module_getattr(self, attr, attr_val, parameter_proxy_cache): if getattr(self, "_disable_module_getattr", False): return attr_val else: - # return super()._module_getattr(attr, attr_val, parameter_proxy_cache) + def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cache): for n, p in collection_to_search: if attr_val is p: @@ -899,6 +900,10 @@ def maybe_get_proxy_for_attr(attr_val, collection_to_search, parameter_proxy_cac return attr_val + # Needed for PyTorch 1.13+ + def getattr(self, attr: str, attr_val: Any, parameter_proxy_cache: Dict[str, Any]): + return self._module_getattr(attr, attr_val, parameter_proxy_cache) + def call_module(self, m, forward, args, kwargs): self.orig_forward = forward return super().call_module(m, forward, args, kwargs) From 0dc7b3a7858b0359c4606f8a60d5da02e24b5524 Mon Sep 17 00:00:00 2001 From: Aritra Roy Gosthipaty Date: Thu, 29 Sep 2022 15:18:04 +0530 Subject: [PATCH 412/539] [TensorFlow] Adding GroupViT (#18020) * chore: initial commit * chore: adding util methods yet to work on the nn.functional.interpolate port with align_corener=True * chore: refactor the utils * used tf.compat.v1.image.resize to align the F.interpolate function * added type hints to the method signatures * added references to the gists where one 2 one alignment of torch and tf has been shown * chore: adding the layers * chore: porting all the layers from torch to tf This is the initial draft, nothing is tested yet. * chore: aligning the layers with reference to tf clip * chore: aligning the modules * added demaraction comments * added copied and adapted from comments * chore: aligning with CLIP * chore: wrangling the layers to keep it tf compatible * chore: aligning the names of the layers for porting * chore: style changes * chore: adding docs and inits * chore: adding tfp dependencis the code is taken from TAPAS * chore: initial commit for testing * chore: aligning the vision embeddings with the vit implementatino * chore: changing model prefix * chore: fixing the name of the model and the layer normalization test case * chore: every test passes but the slow ones * chore: fix style and integration test * chore: moving comments below decorators * chore: make fixup and fix-copies changes * chore: adding the Vision and Text Model to check_repo * chore: modifying the prefix name to align it with the torch implementation * chore: fix typo in configuration * choer: changing the name of the model variable * chore: adding segmentation flag * chore: gante's review * chore: style refactor * chore: amy review * chore: adding shape_list to parts that have been copied from other snippets * chore: init batchnorm with torch defaults * chore: adding shape_list to pass the tests * test fix: adding seed as 0 * set seed * chore: changing the straight through trick to fix -ve dimensinos * chore: adding a dimension to the loss * chore: adding reviewers and contributors names to the docs * chore: added changes after review * chore: code quality fixup * chore: fixing the segmentation snippet * chore: adding to the layer calls * chore: changing int32 to int64 for inputs of serving * chore: review changes * chore: style changes * chore: remove from_pt=True * fix: repo consistency Co-authored-by: ydshieh --- docs/source/en/index.mdx | 2 +- docs/source/en/model_doc/groupvit.mdx | 19 +- src/transformers/__init__.py | 16 + .../models/auto/modeling_tf_auto.py | 1 + src/transformers/models/groupvit/__init__.py | 30 +- .../models/groupvit/configuration_groupvit.py | 2 +- .../models/groupvit/modeling_groupvit.py | 2 +- .../models/groupvit/modeling_tf_groupvit.py | 1993 +++++++++++++++++ src/transformers/utils/dummy_tf_objects.py | 31 + .../models/groupvit/test_modeling_groupvit.py | 33 +- .../groupvit/test_modeling_tf_groupvit.py | 715 ++++++ tests/test_modeling_tf_common.py | 2 +- utils/check_repo.py | 2 + utils/documentation_tests.txt | 2 + 14 files changed, 2841 insertions(+), 9 deletions(-) create mode 100644 src/transformers/models/groupvit/modeling_tf_groupvit.py create mode 100644 tests/models/groupvit/test_modeling_tf_groupvit.py diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index e6a3d912b27437..98a458e11fff41 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -248,7 +248,7 @@ Flax), PyTorch, and/or TensorFlow. | GPT NeoX | ❌ | ✅ | ✅ | ❌ | ❌ | | GPT NeoX Japanese | ✅ | ❌ | ✅ | ❌ | ❌ | | GPT-J | ❌ | ❌ | ✅ | ✅ | ✅ | -| GroupViT | ❌ | ❌ | ✅ | ❌ | ❌ | +| GroupViT | ❌ | ❌ | ✅ | ✅ | ❌ | | Hubert | ❌ | ❌ | ✅ | ✅ | ❌ | | I-BERT | ❌ | ❌ | ✅ | ❌ | ❌ | | ImageGPT | ❌ | ❌ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/groupvit.mdx b/docs/source/en/model_doc/groupvit.mdx index bad55ea28c6335..8c955a2e30f7fc 100644 --- a/docs/source/en/model_doc/groupvit.mdx +++ b/docs/source/en/model_doc/groupvit.mdx @@ -26,7 +26,7 @@ Tips: - You may specify `output_segmentation=True` in the forward of `GroupViTModel` to get the segmentation logits of input texts. - The quickest way to get started with GroupViT is by checking the [example notebooks](https://github.com/xvjiarui/GroupViT/blob/main/demo/GroupViT_hf_inference_notebook.ipynb) (which showcase zero-shot segmentation inference). One can also check out the [HuggingFace Spaces demo](https://huggingface.co/spaces/xvjiarui/GroupViT) to play with GroupViT. -This model was contributed by [xvjiarui](https://huggingface.co/xvjiarui). +This model was contributed by [xvjiarui](https://huggingface.co/xvjiarui). The TensorFlow version was contributed by [ariG23498](https://huggingface.co/ariG23498) with the help of [Yih-Dar SHIEH](https://huggingface.co/ydshieh), [Amy Roberts](https://huggingface.co/amyeroberts), and [Joao Gante](https://huggingface.co/joaogante). The original code can be found [here](https://github.com/NVlabs/GroupViT). @@ -59,3 +59,20 @@ The original code can be found [here](https://github.com/NVlabs/GroupViT). [[autodoc]] GroupViTVisionModel - forward + +## TFGroupViTModel + +[[autodoc]] TFGroupViTModel + - call + - get_text_features + - get_image_features + +## TFGroupViTTextModel + +[[autodoc]] TFGroupViTTextModel + - call + +## TFGroupViTVisionModel + +[[autodoc]] TFGroupViTVisionModel + - call \ No newline at end of file diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 50fb4d2c0b8a7e..fb09d0af9f261d 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -2417,6 +2417,15 @@ "TFGPTJPreTrainedModel", ] ) + _import_structure["models.groupvit"].extend( + [ + "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFGroupViTModel", + "TFGroupViTPreTrainedModel", + "TFGroupViTTextModel", + "TFGroupViTVisionModel", + ] + ) _import_structure["models.hubert"].extend( [ "TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -4986,6 +4995,13 @@ TFGPTJModel, TFGPTJPreTrainedModel, ) + from .models.groupvit import ( + TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFGroupViTModel, + TFGroupViTPreTrainedModel, + TFGroupViTTextModel, + TFGroupViTVisionModel, + ) from .models.hubert import ( TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFHubertForCTC, diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index 77ab03d38aeb1c..e13a0754b69261 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -50,6 +50,7 @@ ("funnel", ("TFFunnelModel", "TFFunnelBaseModel")), ("gpt2", "TFGPT2Model"), ("gptj", "TFGPTJModel"), + ("groupvit", "TFGroupViTModel"), ("hubert", "TFHubertModel"), ("layoutlm", "TFLayoutLMModel"), ("layoutlmv3", "TFLayoutLMv3Model"), diff --git a/src/transformers/models/groupvit/__init__.py b/src/transformers/models/groupvit/__init__.py index 3985e9ecff5d73..0e8b51fedbd19d 100644 --- a/src/transformers/models/groupvit/__init__.py +++ b/src/transformers/models/groupvit/__init__.py @@ -17,7 +17,7 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = { @@ -44,6 +44,20 @@ "GroupViTVisionModel", ] +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_groupvit"] = [ + "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFGroupViTModel", + "TFGroupViTPreTrainedModel", + "TFGroupViTTextModel", + "TFGroupViTVisionModel", + ] + if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, @@ -67,6 +81,20 @@ GroupViTVisionModel, ) + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_groupvit import ( + TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFGroupViTModel, + TFGroupViTPreTrainedModel, + TFGroupViTTextModel, + TFGroupViTVisionModel, + ) + else: import sys diff --git a/src/transformers/models/groupvit/configuration_groupvit.py b/src/transformers/models/groupvit/configuration_groupvit.py index 895c0608b730f9..ea428224105133 100644 --- a/src/transformers/models/groupvit/configuration_groupvit.py +++ b/src/transformers/models/groupvit/configuration_groupvit.py @@ -162,7 +162,7 @@ class GroupViTVisionConfig(PretrainedConfig): The number of layers in each encoder block. num_group_tokens (`List[int]`, *optional*, defaults to [64, 8, 0]): The number of group tokens for each stage. - num_output_groups (`List[int]`, *optional*, defaults to [64, 8, 0]): + num_output_groups (`List[int]`, *optional*, defaults to [64, 8, 8]): The number of output groups for each stage, 0 means no group. num_attention_heads (`int`, *optional*, defaults to 6): Number of attention heads for each attention layer in the Transformer encoder. diff --git a/src/transformers/models/groupvit/modeling_groupvit.py b/src/transformers/models/groupvit/modeling_groupvit.py index 3d2f78e3cfd3b2..210a848f28c73d 100644 --- a/src/transformers/models/groupvit/modeling_groupvit.py +++ b/src/transformers/models/groupvit/modeling_groupvit.py @@ -1300,7 +1300,7 @@ def forward( >>> import requests >>> from transformers import AutoProcessor, GroupViTVisionModel - >>> processor = AutoPProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") + >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" diff --git a/src/transformers/models/groupvit/modeling_tf_groupvit.py b/src/transformers/models/groupvit/modeling_tf_groupvit.py new file mode 100644 index 00000000000000..481f065eb6f6ac --- /dev/null +++ b/src/transformers/models/groupvit/modeling_tf_groupvit.py @@ -0,0 +1,1993 @@ +# coding=utf-8 +# Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 GroupViT model.""" + + +import collections.abc +import math +from dataclasses import dataclass +from typing import Any, Dict, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling +from ...modeling_tf_utils import ( + DUMMY_INPUTS, + TFModelInputType, + TFPreTrainedModel, + get_initializer, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import shape_list, stable_softmax +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_tensorflow_probability_available, + logging, + replace_return_docstrings, +) +from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig + + +logger = logging.get_logger(__name__) + +# soft dependency +if is_tensorflow_probability_available(): + try: + import tensorflow_probability as tfp + + # On the first call, check whether a compatible version of TensorFlow is installed + # TensorFlow Probability depends on a recent stable release of TensorFlow + _ = tfp.distributions.Normal(loc=0.0, scale=1.0) + except ImportError: + logger.error( + "GroupViT models are not usable since `tensorflow_probability` can't be loaded." + "It seems you have `tensorflow_probability` installed with the wrong tensorflow version." + "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability." + ) + +_CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc" + +TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "nvidia/groupvit-gcc-yfcc", + # See all GroupViT models at https://huggingface.co/models?filter=groupvit +] + + +LARGE_NEGATIVE = -1e8 + + +# Copied from transformers.models.bart.modeling_tf_bart._expand_mask +def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + src_len = shape_list(mask)[1] + tgt_len = tgt_len if tgt_len is not None else src_len + one_cst = tf.constant(1.0) + mask = tf.cast(mask, dtype=one_cst.dtype) + expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) + + return (one_cst - expanded_mask) * LARGE_NEGATIVE + + +# contrastive loss function, adapted from +# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html +def contrastive_loss(logits: tf.Tensor) -> tf.Tensor: + return tf.math.reduce_mean( + tf.keras.metrics.sparse_categorical_crossentropy( + y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True + ) + ) + + +# Copied from transformers.models.clip.modeling_tf_clip.clip_loss with clip->groupvit +def groupvit_loss(similarity: tf.Tensor) -> tf.Tensor: + caption_loss = contrastive_loss(similarity) + image_loss = contrastive_loss(tf.transpose(similarity)) + return (caption_loss + image_loss) / 2.0 + + +def hard_softmax(logits: tf.Tensor, dim: int) -> tf.Tensor: + y_soft = stable_softmax(logits, dim) + # Straight through. + index = tf.argmax(y_soft, dim) + y_hard = tf.one_hot( + index, + depth=shape_list(logits)[dim], + # TensorFlow expects axis to be -1 or between [0, 3). But received: -2 + # This is why the following code snippet is used. + axis=range(len(shape_list(logits)))[dim], + dtype=y_soft.dtype, + ) + ret = y_hard - tf.stop_gradient(y_soft) + y_soft + + return ret + + +def gumbel_softmax(logits: tf.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> tf.Tensor: + gumbel_dist = tfp.distributions.Gumbel(0.0, 1.0) + gumbels = gumbel_dist.sample(tf.shape(logits), dtype=logits.dtype) + + gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau) + y_soft = stable_softmax(gumbels, dim) + + if hard: + # Straight through. + index = tf.argmax(y_soft, dim) + y_hard = tf.one_hot( + index, + depth=shape_list(logits)[dim], + # TensorFlow expects axis to be -1 or between [0, 3). But received: -2 + # This is why the following code snippet is used. + axis=range(len(shape_list(logits)))[dim], + dtype=y_soft.dtype, + ) + ret = y_hard - tf.stop_gradient(y_soft) + y_soft + else: + # Reparametrization trick. + ret = y_soft + return ret + + +def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool = False) -> tf.Tensor: + """ + Args: + attentions (`tf.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width] + height (`int`): height of the output attention map + width (`int`): width of the output attention map + align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`. + + Returns: + `tf.Tensor`: resized attention map of shape [batch_size, groups, height, width] + """ + + scale = (height * width // attentions.shape[2]) ** 0.5 + if height > width: + feat_width = int(np.round(width / scale)) + feat_height = shape_list(attentions)[2] // feat_width + else: + feat_height = int(np.round(height / scale)) + feat_width = shape_list(attentions)[2] // feat_height + + batch_size = shape_list(attentions)[0] + groups = shape_list(attentions)[1] # number of group token + # [batch_size, groups, height x width, groups] -> [batch_size, groups, height, width] + attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width)) + attentions = tf.transpose(attentions, perm=(0, 2, 3, 1)) + if align_corners: + attentions = tf.compat.v1.image.resize( + attentions, + size=(height, width), + method="bilinear", + align_corners=align_corners, + ) + else: + attentions = tf.image.resize(attentions, size=(height, width), method="bilinear") + attentions = tf.transpose(attentions, perm=(0, 3, 1, 2)) + return attentions + + +def get_grouping_from_attentions(attentions: Tuple[tf.Tensor], hw_shape: Tuple[int]) -> tf.Tensor: + """ + Args: + attentions (`tuple(tf.Tensor)`: tuple of attention maps returned by `TFGroupViTVisionTransformer` + hw_shape (`tuple(int)`): height and width of the output attention map + Returns: + `tf.Tensor`: the attention map of shape [batch_size, groups, height, width] + """ + + attn_maps = [] + prev_attn_masks = None + for attn_masks in attentions: + # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups] + attn_masks = tf.transpose(attn_masks, perm=(0, 2, 1)) + if prev_attn_masks is None: + prev_attn_masks = attn_masks + else: + prev_attn_masks = tf.matmul(prev_attn_masks, attn_masks) + # [batch_size, height x width, num_groups] -> [batch_size, num_groups, height x width] -> [batch_size, num_groups, height, width] + cur_attn_map = resize_attention_map(tf.transpose(prev_attn_masks, perm=(0, 2, 1)), *hw_shape) + attn_maps.append(cur_attn_map) + + # [batch_size, num_groups, height, width] + final_grouping = attn_maps[-1] + + return tf.stop_gradient(final_grouping) + + +@dataclass +class TFGroupViTModelOutput(ModelOutput): + """ + Args: + loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for image-text similarity. + logits_per_image (`tf.Tensor` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text (`tf.Tensor` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + segmentation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`): + Classification scores for each pixel. + + + + The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is + to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the + original image size as post-processing. You should always check your logits shape and resize as needed. + + + + text_embeds (`tf.Tensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of + [`TFGroupViTTextModel`]. + image_embeds (`tf.Tensor` of shape `(batch_size, output_dim`): + The image embeddings obtained by applying the projection layer to the pooled output of + [`TFGroupViTVisionModel`]. + text_model_output (`TFBaseModelOutputWithPooling`): + The output of the [`TFGroupViTTextModel`]. + vision_model_output (`TFBaseModelOutputWithPooling`): + The output of the [`TFGroupViTVisionModel`]. + """ + + loss: Optional[tf.Tensor] = None + logits_per_image: tf.Tensor = None + logits_per_text: tf.Tensor = None + segmentation_logits: tf.Tensor = None + text_embeds: tf.Tensor = None + image_embeds: tf.Tensor = None + text_model_output: TFBaseModelOutputWithPooling = None + vision_model_output: TFBaseModelOutputWithPooling = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +class TFGroupViTCrossAttentionLayer(tf.keras.layers.Layer): + def __init__(self, config: GroupViTVisionConfig, **kwargs): + super().__init__(**kwargs) + self.attn = TFGroupViTAttention(config, name="attn") + self.norm2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm2") + self.mlp = TFGroupViTMLP(config, name="mlp") + self.norm_post = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post") + + def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False) -> tf.Tensor: + x = query + x = x + self.attn(query, encoder_hidden_states=key)[0] + x = x + self.mlp(self.norm2(x)) + x = self.norm_post(x) + return x + + +class TFGroupViTAssignAttention(tf.keras.layers.Layer): + def __init__(self, config: GroupViTVisionConfig, **kwargs): + super().__init__(**kwargs) + self.scale = config.hidden_size**-0.5 + + self.q_proj = tf.keras.layers.Dense(config.hidden_size, name="q_proj") + self.k_proj = tf.keras.layers.Dense(config.hidden_size, name="k_proj") + self.v_proj = tf.keras.layers.Dense(config.hidden_size, name="v_proj") + self.proj = tf.keras.layers.Dense(config.hidden_size, name="proj") + self.assign_eps = config.assign_eps + + def get_attn(self, attn: tf.Tensor, gumbel: bool = True, hard: bool = True, training: bool = False) -> tf.Tensor: + + if gumbel and training: + attn = gumbel_softmax(attn, dim=-2, hard=hard) + else: + if hard: + attn = hard_softmax(attn, dim=-2) + else: + attn = stable_softmax(attn, axis=-2) + + return attn + + def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False): + value = key + # [batch_size, query_length, channels] + query = self.q_proj(query) + + # [batch_size, key_length, channels] + key = self.k_proj(key) + + # [batch_size, key_length, channels] + value = self.v_proj(value) + + # [batch_size, query_length, key_length] + raw_attn = tf.matmul(query, key, transpose_b=True) * self.scale + + attn = self.get_attn(raw_attn, training=training) + soft_attn = self.get_attn(raw_attn, training=training, gumbel=False, hard=False) + + attn = attn / (tf.math.reduce_sum(attn, axis=-1, keepdims=True) + self.assign_eps) + + out = tf.matmul(attn, value) + + out = self.proj(out) + + return out, soft_attn + + +class TFGroupViTTokenAssign(tf.keras.layers.Layer): + def __init__(self, config: GroupViTVisionConfig, num_group_token: int, num_output_group: int, **kwargs): + super().__init__(**kwargs) + self.num_output_group = num_output_group + # norm on group_tokens + self.norm_tokens = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_tokens") + assign_mlp_ratio = ( + config.assign_mlp_ratio + if isinstance(config.assign_mlp_ratio, collections.abc.Iterable) + else (config.assign_mlp_ratio, config.assign_mlp_ratio) + ) + tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio] + self.mlp_inter = TFGroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group, name="mlp_inter") + self.norm_post_tokens = tf.keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name="norm_post_tokens" + ) + # norm on x + self.norm_x = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_x") + self.pre_assign_attn = TFGroupViTCrossAttentionLayer(config, name="pre_assign_attn") + + self.assign = TFGroupViTAssignAttention(config, name="assign") + self.norm_new_x = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_new_x") + self.mlp_channels = TFGroupViTMLP( + config, config.hidden_size, channels_dim, config.hidden_size, name="mlp_channels" + ) + + def project_group_token(self, group_tokens: tf.Tensor) -> tf.Tensor: + """ + Args: + group_tokens (tf.Tensor): group tokens, [batch_size, num_group_tokens, channels] + + Returns: + projected_group_tokens (tf.Tensor): [batch_size, num_output_groups, channels] + """ + # [B, num_output_groups, C] <- [B, num_group_tokens, C] + projected_group_tokens = self.mlp_inter(group_tokens) + projected_group_tokens = self.norm_post_tokens(projected_group_tokens) + return projected_group_tokens + + def call(self, image_tokens: tf.Tensor, group_tokens: tf.Tensor, training: bool = False): + """ + Args: + image_tokens (`tf.Tensor`): image tokens, of shape [batch_size, input_length, channels] + group_tokens (`tf.Tensor`): group tokens, [batch_size, num_group_tokens, channels] + """ + + group_tokens = self.norm_tokens(group_tokens) + image_tokens = self.norm_x(image_tokens) + # [batch_size, num_output_groups, channels] + projected_group_tokens = self.project_group_token(group_tokens) + projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens) + new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens) + new_image_tokens += projected_group_tokens + + new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens)) + + return new_image_tokens, attention + + +# Adapted from transformers.models.vit.modeling_tf_vit.TFViTPatchEmbeddings with ViT->GroupViT +class TFGroupViTPatchEmbeddings(tf.keras.layers.Layer): + """ + This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial + `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a + Transformer. + """ + + def __init__(self, config: GroupViTConfig, **kwargs): + super().__init__(**kwargs) + image_size, patch_size = config.image_size, config.patch_size + num_channels = config.num_channels + # hidden_size is a member as it will be required in the call method + self.hidden_size = config.hidden_size + + image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size) + patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + self.image_size = image_size + self.patch_size = patch_size + self.num_patches = num_patches + self.num_channels = num_channels + self.config = config + + self.projection = tf.keras.layers.Conv2D( + filters=self.hidden_size, + kernel_size=patch_size, + strides=patch_size, + padding="valid", + data_format="channels_last", + use_bias=True, + kernel_initializer=get_initializer(self.config.initializer_range), + bias_initializer="zeros", + name="projection", + ) + + def call( + self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False + ) -> tf.Tensor: + batch_size, num_channels, height, width = shape_list(pixel_values) + if tf.executing_eagerly() and num_channels != self.num_channels: + raise ValueError( + "Make sure that the channel dimension of the pixel values match with the one set in the configuration." + ) + if ( + not interpolate_pos_encoding + and tf.executing_eagerly() + and (height != self.image_size[0] or width != self.image_size[1]) + ): + raise ValueError( + f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})." + ) + + # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. + # So change the input format from `NCHW` to `NHWC`. + # shape = (batch_size, in_height, in_width, in_channels=num_channels) + pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1)) + + projection = self.projection(pixel_values) + + # Change the 2D spatial dimensions to a single temporal dimension. + # shape = (batch_size, num_patches, out_channels=embed_dim) + num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0]) + # In the TFGroupViTVisionEmbeddings the embeddings from this layer will be layer normalized + # LayerNormalization layer needs to have static last dimension (otherwise the test_keras_save_load fails with symbolic tensors) + # This is why we have used the hidden_size in the reshape method + embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, self.hidden_size)) + + return embeddings + + +# Adapted from transformers.vit.modeling_tf_vit.TFViTEmbeddings +class TFGroupViTVisionEmbeddings(tf.keras.layers.Layer): + """ + Construct the position and patch embeddings. + + """ + + def __init__(self, config: GroupViTVisionConfig, **kwargs): + super().__init__(**kwargs) + + self.patch_embeddings = TFGroupViTPatchEmbeddings(config, name="patch_embeddings") + self.dropout = tf.keras.layers.Dropout(rate=config.dropout, name="dropout") + self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") + self.config = config + + def build(self, input_shape: tf.TensorShape): + + num_patches = self.patch_embeddings.num_patches + self.position_embeddings = self.add_weight( + shape=(1, num_patches, self.config.hidden_size), + initializer="zeros", + trainable=True, + name="position_embeddings", + ) + + super().build(input_shape) + + def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor: + """ + This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher + resolution images. + + Source: + https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174 + """ + + batch_size, num_patches, dim = shape_list(embeddings) + num_positions = shape_list(self.position_embeddings)[1] + + if num_patches == num_positions and height == width: + return self.position_embeddings + patch_pos_embed = self.position_embeddings + h0 = height // self.config.patch_size + w0 = width // self.config.patch_size + patch_pos_embed = tf.image.resize( + images=tf.reshape( + patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim) + ), + size=(h0, w0), + method="bicubic", + ) + patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim)) + return patch_pos_embed + + def call( + self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False + ) -> tf.Tensor: + _, _, height, width = shape_list(pixel_values) + embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) + embeddings = self.layernorm(embeddings) + + # add positional encoding to each token + if interpolate_pos_encoding: + embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) + else: + embeddings = embeddings + self.position_embeddings + + embeddings = self.dropout(embeddings) + + return embeddings + + +# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextEmbeddings with CLIP->GroupViT +class TFGroupViTTextEmbeddings(tf.keras.layers.Layer): + def __init__(self, config: GroupViTTextConfig, **kwargs): + super().__init__(**kwargs) + + self.embed_dim = config.hidden_size + self.vocab_size = config.vocab_size + + self.config = config + + def build(self, input_shape: tf.TensorShape): + + with tf.name_scope("token_embedding"): + self.weight = self.add_weight( + shape=(self.vocab_size, self.embed_dim), + initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), + trainable=True, + name="weight", + ) + + with tf.name_scope("position_embedding"): + self.position_embedding = self.add_weight( + shape=(self.config.max_position_embeddings, self.embed_dim), + initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range), + trainable=True, + name="embeddings", + ) + + super().build(input_shape) + + def call( + self, + input_ids: tf.Tensor = None, + position_ids: tf.Tensor = None, + inputs_embeds: tf.Tensor = None, + ) -> tf.Tensor: + """ + Applies embedding based on inputs tensor. + + Returns: + final_embeddings (`tf.Tensor`): output embedding tensor. + """ + if input_ids is None and inputs_embeds is None: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) + inputs_embeds = tf.gather(params=self.weight, indices=input_ids) + + input_shape = shape_list(inputs_embeds)[:-1] + + if position_ids is None: + position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) + + position_embeds = tf.gather(params=self.position_embedding, indices=position_ids) + position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) + final_embeddings = inputs_embeds + position_embeds + + return final_embeddings + + +class TFGroupViTStage(tf.keras.layers.Layer): + """This corresponds to the `GroupingLayer` class in the GroupViT implementation.""" + + def __init__( + self, + config: GroupViTVisionConfig, + depth: int, + num_prev_group_token: int, + num_group_token: int, + num_output_group: int, + **kwargs, + ): + super().__init__(**kwargs) + self.config = config + self.depth = depth + self.num_group_token = num_group_token + self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(depth)] + + if num_group_token > 0: + self.downsample = TFGroupViTTokenAssign( + config=config, + num_group_token=num_group_token, + num_output_group=num_output_group, + name="downsample", + ) + else: + self.downsample = None + + if num_prev_group_token > 0 and num_group_token > 0: + self.group_projector = [ + tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="group_projector.0"), + TFGroupViTMixerMLP( + config, num_prev_group_token, config.hidden_size // 2, num_group_token, name="group_projector.1" + ), + ] + else: + self.group_projector = None + + def build(self, input_shape: tf.TensorShape): + if self.num_group_token > 0: + self.group_token = self.add_weight( + shape=(1, self.num_group_token, self.config.hidden_size), + initializer="zeros", + trainable=True, + name="group_token", + ) + else: + self.group_token = None + super().build(input_shape) + + @property + def with_group_token(self): + return self.group_token is not None + + def split_x(self, x: tf.Tensor) -> tf.Tensor: + if self.with_group_token: + return x[:, : -self.num_group_token], x[:, -self.num_group_token :] + else: + return x, None + + def concat_x(self, x: tf.Tensor, group_token: Optional[tf.Tensor] = None) -> tf.Tensor: + if group_token is None: + return x + return tf.concat([x, group_token], axis=1) + + def call( + self, + hidden_states: tf.Tensor, + prev_group_token: Optional[tf.Tensor] = None, + output_attentions: bool = False, + training: bool = False, + ) -> Tuple[tf.Tensor]: + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`tf.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + `(config.encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the grouping tensors of Grouping block. + """ + if self.with_group_token: + group_token = tf.tile(self.group_token, multiples=(shape_list(hidden_states)[0], 1, 1)) + if self.group_projector is not None: + for layer in self.group_projector: + prev_group_token = layer(prev_group_token) + group_token = group_token + prev_group_token + else: + group_token = None + + x = hidden_states + + cat_x = self.concat_x(x, group_token) + for layer in self.layers: + layer_out = layer( + cat_x, + attention_mask=None, + causal_attention_mask=None, + output_attentions=None, + ) + cat_x = layer_out[0] + + x, group_token = self.split_x(cat_x) + + attention = None + if self.downsample is not None: + x, attention = self.downsample(x, group_token) + + outputs = (x, group_token) + if output_attentions: + outputs = outputs + (attention,) + + return outputs + + +class TFGroupViTMLP(tf.keras.layers.Layer): + def __init__( + self, + config: GroupViTVisionConfig, + hidden_size: Optional[int] = None, + intermediate_size: Optional[int] = None, + output_size: Optional[int] = None, + **kwargs, + ): + super().__init__(**kwargs) + self.config = config + self.activation_fn = get_tf_activation(config.hidden_act) + hidden_size = hidden_size if hidden_size is not None else config.hidden_size + intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size + output_size = output_size if output_size is not None else hidden_size + self.fc1 = tf.keras.layers.Dense(intermediate_size, name="fc1") + self.fc2 = tf.keras.layers.Dense(output_size, name="fc2") + + def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.fc1(hidden_states) + hidden_states = self.activation_fn(hidden_states) + hidden_states = self.fc2(hidden_states) + return hidden_states + + +class TFGroupViTMixerMLP(TFGroupViTMLP): + def call(self, x, training: bool = False): + x = super().call(hidden_states=tf.transpose(x, perm=(0, 2, 1))) + return tf.transpose(x, perm=(0, 2, 1)) + + +# Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPAttention +class TFGroupViTAttention(tf.keras.layers.Layer): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: GroupViTConfig, **kwargs): + super().__init__(**kwargs) + + self.embed_dim = config.hidden_size + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = self.embed_dim // self.num_attention_heads + if self.attention_head_size * self.num_attention_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_attention_heads})." + ) + + factor = config.initializer_factor + in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor + out_proj_std = (self.embed_dim**-0.5) * factor + + self.sqrt_att_head_size = math.sqrt(self.attention_head_size) + + self.q_proj = tf.keras.layers.Dense( + units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj" + ) + self.k_proj = tf.keras.layers.Dense( + units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj" + ) + self.v_proj = tf.keras.layers.Dense( + units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj" + ) + + self.dropout = tf.keras.layers.Dropout(rate=config.attention_dropout) + + self.out_proj = tf.keras.layers.Dense( + units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj" + ) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores + def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: + # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] + tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) + + # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] + return tf.transpose(tensor, perm=[0, 2, 1, 3]) + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor = None, + causal_attention_mask: tf.Tensor = None, + output_attentions: bool = None, + encoder_hidden_states: tf.Tensor = None, + training: bool = False, + ) -> Tuple[tf.Tensor]: + """Input shape: Batch x Time x Channel""" + + batch_size = shape_list(hidden_states)[0] + is_cross_attention = encoder_hidden_states is not None + + mixed_query_layer = self.q_proj(inputs=hidden_states) + if is_cross_attention: + mixed_key_layer = self.k_proj(inputs=encoder_hidden_states) + mixed_value_layer = self.v_proj(inputs=encoder_hidden_states) + else: + mixed_key_layer = self.k_proj(inputs=hidden_states) + mixed_value_layer = self.v_proj(inputs=hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) + key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) + value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) + + # Take the dot product between "query" and "key" to get the raw attention scores. + # (batch size, num_heads, seq_len_q, seq_len_k) + attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) + dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) + attention_scores = tf.divide(attention_scores, dk) + + # apply the causal_attention_mask first + if causal_attention_mask is not None: + # Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function) + attention_scores = tf.add(attention_scores, causal_attention_mask) + + if attention_mask is not None: + # Apply the attention mask (precomputed for all layers in TFCLIPModel call() function) + attention_scores = tf.add(attention_scores, attention_mask) + + # Normalize the attention scores to probabilities. + _attention_probs = stable_softmax(logits=attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(inputs=_attention_probs) + + attention_output = tf.matmul(attention_probs, value_layer) + attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) + + # (batch_size, seq_len_q, embed_dim) + attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim)) + + attention_output = self.out_proj(attention_output) + # In TFBert, attention weights are returned after dropout. + # However, in CLIP, they are returned before dropout. + outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,) + + return outputs + + +# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPEncoderLayer with CLIP->GroupViT +class TFGroupViTEncoderLayer(tf.keras.layers.Layer): + def __init__(self, config: GroupViTConfig, **kwargs): + super().__init__(**kwargs) + + self.embed_dim = config.hidden_size + self.self_attn = TFGroupViTAttention(config, name="self_attn") + self.layer_norm1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1") + self.mlp = TFGroupViTMLP(config, name="mlp") + self.layer_norm2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2") + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + causal_attention_mask: tf.Tensor, + output_attentions: bool, + training: bool = False, + ) -> Tuple[tf.Tensor]: + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`tf.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + causal_attention_mask (`tf.Tensor`): causal attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + output_attentions (`bool`): + Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned + tensors for more detail. + """ + residual = hidden_states + + hidden_states = self.layer_norm1(inputs=hidden_states) + attention_outputs = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + training=training, + ) + hidden_states = attention_outputs[0] + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.layer_norm2(inputs=hidden_states) + hidden_states = self.mlp(hidden_states=hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them + + return outputs + + +# Adapted from transformers.models.clip.modeling_tf_clip.TFGroupViTTextEncoder +class TFGroupViTTextEncoder(tf.keras.layers.Layer): + def __init__(self, config: GroupViTTextConfig, **kwargs): + super().__init__(**kwargs) + + self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)] + + def call( + self, + hidden_states, + attention_mask: tf.Tensor, + causal_attention_mask: tf.Tensor, + output_attentions: bool, + output_hidden_states: bool, + return_dict: bool, + training: bool = False, + ) -> Union[Tuple, TFBaseModelOutput]: + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + causal_attention_mask, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return TFBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class TFGroupViTVisionEncoder(tf.keras.layers.Layer): + def __init__(self, config: GroupViTVisionConfig, **kwargs) -> None: + super().__init__(**kwargs) + + self.stages = [ + TFGroupViTStage( + config=config, + depth=config.depths[i], + num_group_token=config.num_group_tokens[i], + num_output_group=config.num_output_groups[i], + num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0, + name=f"stages_._{i}", + ) + for i in range(len(config.depths)) + ] + + def call( + self, + hidden_states: tf.Tensor, + output_hidden_states: bool, + output_attentions: bool, + return_dict: bool, + training: bool = False, + ) -> Union[tuple, TFBaseModelOutput]: + all_hidden_states = () if output_hidden_states else None + all_groupings = () if output_attentions else None + + group_tokens = None + + for stage in self.stages: + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_outputs = stage(hidden_states, group_tokens, output_attentions) + + hidden_states = layer_outputs[0] + group_tokens = layer_outputs[1] + + if output_attentions and layer_outputs[2] is not None: + all_groupings = all_groupings + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None) + return TFBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings + ) + + +# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder +class TFGroupViTTextTransformer(tf.keras.layers.Layer): + def __init__(self, config: GroupViTTextConfig, **kwargs): + super().__init__(**kwargs) + + self.embeddings = TFGroupViTTextEmbeddings(config, name="embeddings") + self.encoder = TFGroupViTTextEncoder(config, name="encoder") + self.final_layer_norm = tf.keras.layers.LayerNormalization( + epsilon=config.layer_norm_eps, name="final_layer_norm" + ) + + def call( + self, + input_ids: TFModelInputType, + attention_mask: tf.Tensor, + position_ids: tf.Tensor, + output_attentions: bool, + output_hidden_states: bool, + return_dict: bool, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + input_shape = shape_list(input_ids) + + embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids) + + batch_size, seq_length = input_shape + # CLIP's text model uses causal mask, prepare it here. + # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324 + causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype) + + # check attention mask and invert + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask) + + encoder_outputs = self.encoder( + hidden_states=embedding_output, + attention_mask=attention_mask, + causal_attention_mask=causal_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = encoder_outputs[0] + sequence_output = self.final_layer_norm(inputs=sequence_output) + + # text_embeds.shape = [batch_size, n_ctx, transformer.width] + # take features from the eot embedding (eot_token is the highest number in each sequence) + pooled_output = tf.gather_nd( + params=sequence_output, + indices=tf.stack( + values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1 + ), + ) + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return TFBaseModelOutputWithPooling( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32): + # It is possible with an unspecified sequence length for seq_length to be + # a runtime value, which is unsupported by tf.constant. Per the TensorFlow + # docs, tf.fill can handle runtime dynamic shapes: + # https://www.tensorflow.org/api_docs/python/tf/fill + diag = tf.cast(tf.fill((seq_length,), 0.0), dtype) + + # set an additive 2D attention mask with all places being masked + to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype) + + # set diagonal & lower triangular parts to 0 (i.e. the places not to be masked) + # TIP: think the 2D matrix as the space of (query_seq, key_seq) + to_mask = tf.linalg.band_part(to_mask, 0, -1) + # to_mask = tf.linalg.band_part(to_mask, -1, 0) + to_mask = tf.linalg.set_diag(to_mask, diagonal=diag) + + return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length)) + + +# Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPVisionTransformer +class TFGroupViTVisionTransformer(tf.keras.layers.Layer): + def __init__(self, config: GroupViTVisionConfig, **kwargs): + super().__init__(**kwargs) + + self.embeddings = TFGroupViTVisionEmbeddings(config, name="embeddings") + self.encoder = TFGroupViTVisionEncoder(config, name="encoder") + self.layernorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm") + + def call( + self, + pixel_values: TFModelInputType, + output_attentions: bool, + output_hidden_states: bool, + return_dict: bool, + training: bool = False, + ) -> Union[Tuple, TFBaseModelOutputWithPooling]: + + embedding_output = self.embeddings(pixel_values) + + encoder_outputs = self.encoder( + hidden_states=embedding_output, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + return_dict=return_dict, + ) + + last_hidden_state = encoder_outputs[0] + + # normalize the last hidden state + last_hidden_state = self.layernorm(last_hidden_state) + pooled_output = tf.math.reduce_mean(last_hidden_state, axis=1) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return TFBaseModelOutputWithPooling( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + ) + + +@keras_serializable +# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextMainLayer with CLIP->GroupViT +class TFGroupViTTextMainLayer(tf.keras.layers.Layer): + config_class = GroupViTTextConfig + + def __init__(self, config: GroupViTTextConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.text_model = TFGroupViTTextTransformer(config, name="text_model") + + def get_input_embeddings(self) -> tf.keras.layers.Layer: + return self.text_model.embeddings + + def set_input_embeddings(self, value: tf.Variable): + self.text_model.embeddings.weight = value + self.text_model.embeddings.vocab_size = shape_list(value)[0] + + @unpack_inputs + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + if input_ids is None: + raise ValueError("You have to specify input_ids") + + input_shape = shape_list(input_ids) + + if attention_mask is None: + attention_mask = tf.fill(dims=input_shape, value=1) + + text_model_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return text_model_outputs + + +@keras_serializable +# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPVisionMainLayer with CLIP->GroupViT +class TFGroupViTVisionMainLayer(tf.keras.layers.Layer): + config_class = GroupViTVisionConfig + + def __init__(self, config: GroupViTVisionConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.vision_model = TFGroupViTVisionTransformer(config, name="vision_model") + + def get_input_embeddings(self) -> tf.keras.layers.Layer: + return self.vision_model.embeddings + + @unpack_inputs + def call( + self, + pixel_values: Optional[TFModelInputType] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + vision_model_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return vision_model_outputs + + +@keras_serializable +# Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPMainLayer +class TFGroupViTMainLayer(tf.keras.layers.Layer): + config_class = GroupViTConfig + + def __init__(self, config: GroupViTConfig, **kwargs): + super().__init__(**kwargs) + + if not isinstance(config.text_config, GroupViTTextConfig): + raise ValueError( + "config.text_config is expected to be of type GroupViTTextConfig but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, GroupViTVisionConfig): + raise ValueError( + "config.vision_config is expected to be of type GroupViTVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + + self.config = config + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + self.projection_intermediate_dim = config.projection_intermediate_dim + self.text_embed_dim = text_config.hidden_size + self.vision_embed_dim = vision_config.hidden_size + + self.text_model = TFGroupViTTextTransformer(text_config, name="text_model") + self.vision_model = TFGroupViTVisionTransformer(vision_config, name="vision_model") + + self.visual_projection = [ + tf.keras.layers.Dense(self.projection_intermediate_dim, name="visual_projection.0"), + tf.keras.layers.BatchNormalization(name="visual_projection.1", momentum=0.1, epsilon=1e-5), + tf.keras.layers.ReLU(name="visual_projection.2"), + tf.keras.layers.Dense(self.projection_dim, name="visual_projection.3"), + ] + self.text_projection = [ + tf.keras.layers.Dense(self.projection_intermediate_dim, name="text_projection.0"), + tf.keras.layers.BatchNormalization(name="text_projection.1", momentum=0.1, epsilon=1e-5), + tf.keras.layers.ReLU(name="text_projection.2"), + tf.keras.layers.Dense(self.projection_dim, name="text_projection.3"), + ] + + def build(self, input_shape: tf.TensorShape): + + self.logit_scale = self.add_weight( + shape=(1,), + initializer=tf.keras.initializers.Constant(self.config.logit_scale_init_value), + trainable=True, + name="logit_scale", + ) + + super().build(input_shape) + + @unpack_inputs + def get_text_features( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> tf.Tensor: + + if input_ids is None: + raise ValueError("You have to specify either input_ids") + + input_shape = shape_list(input_ids) + + if attention_mask is None: + attention_mask = tf.fill(dims=input_shape, value=1) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + pooled_output = text_outputs[1] + for layer in self.text_projection: + pooled_output = layer(pooled_output) + + text_features = pooled_output + return text_features + + @unpack_inputs + def get_image_features( + self, + pixel_values: Optional[TFModelInputType] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> tf.Tensor: + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + pooled_output = vision_outputs[1] + for layer in self.visual_projection: + pooled_output = layer(pooled_output) + + image_features = pooled_output + return image_features + + @unpack_inputs + def call( + self, + input_ids: Optional[TFModelInputType] = None, + pixel_values: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_segmentation: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]: + + if input_ids is None: + raise ValueError("You have to specify either input_ids") + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + input_shape = shape_list(input_ids) + + if attention_mask is None: + attention_mask = tf.fill(dims=input_shape, value=1) + if output_segmentation: + output_attentions = True + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + image_embeds = vision_outputs[1] + for layer in self.visual_projection: + image_embeds = layer(image_embeds) + + text_embeds = text_outputs[1] + for layer in self.text_projection: + text_embeds = layer(text_embeds) + + # normalized features + image_embeds = image_embeds / tf.norm(image_embeds, axis=-1, keepdims=True) + text_embeds = text_embeds / tf.norm(text_embeds, axis=-1, keepdims=True) + + # cosine similarity as logits + logit_scale = tf.math.exp(self.logit_scale) + logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale + logits_per_image = tf.transpose(logits_per_text) + + seg_logits = None + if output_segmentation: + # grouped features + # [batch_size_image, num_group, hidden_size] + image_group_embeds = vision_outputs[0] + # [batch_size_image*num_group, hidden_size] + image_group_embeds = tf.reshape(image_group_embeds, shape=(-1, shape_list(image_group_embeds)[-1])) + for layer in self.visual_projection: + image_group_embeds = layer(image_group_embeds) + if output_hidden_states: + attentions = vision_outputs[3] + else: + attentions = vision_outputs[2] + # [batch_size_image, num_group, height, width] + grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:]) + + # normalized features + image_group_embeds = image_group_embeds / tf.norm( + tensor=image_group_embeds, ord="euclidean", axis=-1, keepdims=True + ) + # [batch_size_image x num_group, batch_size_text] + logits_per_image_group = tf.matmul(image_group_embeds, text_embeds, transpose_b=True) * logit_scale + # [batch_size_image, batch_size_text, num_group] + logits_per_image_group = tf.reshape( + logits_per_image_group, shape=(image_embeds.shape[0], -1, text_embeds.shape[0]) + ) + logits_per_image_group = tf.transpose(logits_per_image_group, perm=(0, 2, 1)) + + # [batch_size_image, batch_size_text, height x width] + flatten_grouping = tf.reshape(grouping, shape=(shape_list(grouping)[0], shape_list(grouping)[1], -1)) + + # [batch_size_image, batch_size_text, height, width] + seg_logits = tf.matmul(logits_per_image_group, flatten_grouping) * logit_scale + seg_logits = tf.reshape( + seg_logits, shape=(seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3]) + ) + + loss = None + if return_loss: + loss = groupvit_loss(logits_per_text)[None, ...] + + if not return_dict: + if seg_logits is not None: + output = ( + logits_per_image, + logits_per_text, + seg_logits, + text_embeds, + image_embeds, + text_outputs, + vision_outputs, + ) + else: + output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + return ((loss,) + output) if loss is not None else output + + return TFGroupViTModelOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + segmentation_logits=seg_logits, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) + + +class TFGroupViTPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GroupViTConfig + base_model_prefix = "groupvit" + + +GROUPVIT_START_DOCSTRING = r""" + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TF 2.0 models accepts two formats as inputs: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional arguments. + + This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the + tensors in the first argument of the model call function: `model(inputs)`. + + If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the + first positional argument : + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + + + Args: + config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GROUPVIT_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False``): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + +GROUPVIT_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`CLIPFeatureExtractor`]. See + [`CLIPFeatureExtractor.__call__`] for details. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False``): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + +GROUPVIT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`CLIPFeatureExtractor`]. See + [`CLIPFeatureExtractor.__call__`] for details. + attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False``): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +class TFGroupViTTextModel(TFGroupViTPreTrainedModel): + config_class = GroupViTTextConfig + main_input_name = "input_ids" + + def __init__(self, config: GroupViTTextConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.groupvit = TFGroupViTTextMainLayer(config, name="groupvit") + + @property + def dummy_inputs(self) -> Dict[str, tf.Tensor]: + """ + Dummy inputs to build the network. + + Returns: + `Dict[str, tf.Tensor]`: The dummy inputs. + """ + return { + "input_ids": tf.constant(DUMMY_INPUTS, dtype=tf.int32), + } + + @tf.function( + input_signature=[ + { + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + } + ] + ) + def serving(self, inputs: Dict[str, tf.Tensor]) -> TFBaseModelOutputWithPooling: + output = self.call(inputs) + return self.serving_output(output) + + @unpack_inputs + @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTTextConfig) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import CLIPTokenizer, TFGroupViTTextModel + + >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") + >>> model = TFGroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled (EOS token) states + ```""" + + outputs = self.groupvit( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFBaseModelOutputWithPooling( + last_hidden_state=output.last_hidden_state, + pooler_output=output.pooler_output, + hidden_states=hs, + attentions=attns, + ) + + +class TFGroupViTVisionModel(TFGroupViTPreTrainedModel): + config_class = GroupViTVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: GroupViTVisionConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.groupvit = TFGroupViTVisionMainLayer(config, name="groupvit") + + @property + def dummy_inputs(self) -> Dict[str, tf.Tensor]: + """ + Dummy inputs to build the network. + + Returns: + `Dict[str, tf.Tensor]`: The dummy inputs. + """ + VISION_DUMMY_INPUTS = tf.random.uniform( + shape=(len(DUMMY_INPUTS), 3, self.config.image_size, self.config.image_size), dtype=tf.float32 + ) + return {"pixel_values": VISION_DUMMY_INPUTS} + + @tf.function( + input_signature=[ + { + "pixel_values": tf.TensorSpec((None, None, None, None), tf.float32, name="pixel_values"), + } + ] + ) + def serving(self, inputs: Dict[str, tf.Tensor]) -> TFBaseModelOutputWithPooling: + """ + Method used for serving the model. + + Args: + inputs (`Dict[str, tf.Tensor]`): + The input of the saved model as a dictionary of tensors. + """ + output = self.call(inputs) + + return self.serving_output(output) + + @unpack_inputs + @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTVisionConfig) + def call( + self, + pixel_values: Optional[TFModelInputType] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, TFGroupViTVisionModel + + >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") + >>> model = TFGroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="tf") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + + outputs = self.groupvit( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling: + # hidden_states and attentions not converted to Tensor with tf.convert_to_tensor as they are all of different dimensions + return TFBaseModelOutputWithPooling( + last_hidden_state=output.last_hidden_state, + pooler_output=output.pooler_output, + hidden_states=output.hidden_states, + attentions=output.attentions, + ) + + +@add_start_docstrings(GROUPVIT_START_DOCSTRING) +class TFGroupViTModel(TFGroupViTPreTrainedModel): + config_class = GroupViTConfig + + def __init__(self, config: GroupViTConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.groupvit = TFGroupViTMainLayer(config, name="groupvit") + + @property + def dummy_inputs(self) -> Dict[str, tf.Tensor]: + """ + Dummy inputs to build the network. + + Returns: + `Dict[str, tf.Tensor]`: The dummy inputs. + """ + VISION_DUMMY_INPUTS = tf.random.uniform( + shape=(len(DUMMY_INPUTS), 3, self.config.vision_config.image_size, self.config.vision_config.image_size), + dtype=tf.float32, + ) + return { + "input_ids": tf.constant(DUMMY_INPUTS, dtype=tf.int32), + "pixel_values": VISION_DUMMY_INPUTS, + } + + @tf.function( + input_signature=[ + { + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "pixel_values": tf.TensorSpec((None, None, None, None), tf.float64, name="pixel_values"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + } + ] + ) + def serving(self, inputs: Dict[str, tf.Tensor]) -> TFGroupViTModelOutput: + """ + Method used for serving the model. + + Args: + inputs (`Dict[str, tf.Tensor]`): + The input of the saved model as a dictionary of tensors. + """ + output = self.call(inputs) + + return self.serving_output(output) + + @unpack_inputs + @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + def get_text_features( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> tf.Tensor: + r""" + Returns: + text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying + the projection layer to the pooled output of [`TFGroupViTTextModel`]. + + Examples: + + ```python + >>> from transformers import CLIPTokenizer, TFGroupViTModel + + >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") + >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf") + >>> text_features = model.get_text_features(**inputs) + ```""" + + text_features = self.groupvit.get_text_features( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return text_features + + @unpack_inputs + @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING) + def get_image_features( + self, + pixel_values: Optional[TFModelInputType] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> tf.Tensor: + r""" + Returns: + image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying + the projection layer to the pooled output of [`TFGroupViTVisionModel`]. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, TFGroupViTModel + + >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") + >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="tf") + + >>> image_features = model.get_image_features(**inputs) + ```""" + + image_features = self.groupvit.get_image_features( + pixel_values=pixel_values, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return image_features + + @unpack_inputs + @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=TFGroupViTModelOutput, config_class=GroupViTConfig) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + pixel_values: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_segmentation: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, TFGroupViTModel + + >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc") + >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor( + ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True + ... ) + + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities + ```""" + + outputs = self.groupvit( + input_ids=input_ids, + pixel_values=pixel_values, + attention_mask=attention_mask, + position_ids=position_ids, + return_loss=return_loss, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + output_segmentation=output_segmentation, + return_dict=return_dict, + training=training, + ) + + return outputs + + def serving_output(self, output: TFGroupViTModelOutput) -> TFGroupViTModelOutput: + # TODO: As is this currently fails with saved_model=True, because + # TensorFlow cannot trace through nested dataclasses. Reference: + # https://github.com/huggingface/transformers/pull/16886 + return output diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 69e11eeb31d605..3acc7804687df7 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -1309,6 +1309,37 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFGroupViTModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGroupViTPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGroupViTTextModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFGroupViTVisionModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/groupvit/test_modeling_groupvit.py b/tests/models/groupvit/test_modeling_groupvit.py index bd6dbd3bc06fc1..76b9d02a872921 100644 --- a/tests/models/groupvit/test_modeling_groupvit.py +++ b/tests/models/groupvit/test_modeling_groupvit.py @@ -17,6 +17,7 @@ import inspect import os +import random import tempfile import unittest @@ -24,7 +25,7 @@ import requests from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig -from transformers.testing_utils import require_torch, require_vision, slow, torch_device +from transformers.testing_utils import is_pt_tf_cross_test, require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester @@ -95,7 +96,8 @@ def __init__( self.seq_length = num_patches def prepare_config_and_inputs(self): - pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) + rng = random.Random(0) + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size], rng=rng) config = self.get_config() return config, pixel_values @@ -161,6 +163,18 @@ def test_config(self): def test_inputs_embeds(self): pass + @is_pt_tf_cross_test + def test_pt_tf_model_equivalence(self): + import tensorflow as tf + + seed = 338 + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + tf.random.set_seed(seed) + return super().test_pt_tf_model_equivalence() + def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() @@ -368,7 +382,8 @@ def __init__( self.scope = scope def prepare_config_and_inputs(self): - input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + rng = random.Random(0) + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size, rng=rng) input_mask = None if self.use_input_mask: @@ -532,6 +547,18 @@ def test_retain_grad_hidden_states_attentions(self): def test_model_common_attributes(self): pass + @is_pt_tf_cross_test + def test_pt_tf_model_equivalence(self): + import tensorflow as tf + + seed = 163 + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + tf.random.set_seed(seed) + return super().test_pt_tf_model_equivalence() + # override as the `logit_scale` parameter initilization is different for GROUPVIT def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() diff --git a/tests/models/groupvit/test_modeling_tf_groupvit.py b/tests/models/groupvit/test_modeling_tf_groupvit.py new file mode 100644 index 00000000000000..8c4053a2c735cd --- /dev/null +++ b/tests/models/groupvit/test_modeling_tf_groupvit.py @@ -0,0 +1,715 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the TensorFlow GroupViT model. """ + + +import inspect +import os +import random +import tempfile +import unittest +from importlib import import_module + +import numpy as np + +import requests +from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig +from transformers.testing_utils import is_pt_tf_cross_test, require_tf, require_vision, slow +from transformers.utils import is_tf_available, is_vision_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask + + +if is_tf_available(): + import tensorflow as tf + + from transformers import TFGroupViTModel, TFGroupViTTextModel, TFGroupViTVisionModel, TFSharedEmbeddings + from transformers.models.groupvit.modeling_tf_groupvit import TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST + + +if is_vision_available(): + from PIL import Image + + from transformers import CLIPProcessor + + +class TFGroupViTVisionModelTester: + def __init__( + self, + parent, + batch_size=12, + image_size=30, + patch_size=2, + num_channels=3, + is_training=True, + hidden_size=32, + depths=[6, 3, 3], + num_group_tokens=[64, 8, 0], + num_output_groups=[64, 8, 8], + num_attention_heads=4, + intermediate_size=37, + dropout=0.1, + attention_dropout=0.1, + initializer_range=0.02, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.is_training = is_training + self.hidden_size = hidden_size + self.depths = depths + self.num_hidden_layers = sum(depths) + self.expected_num_hidden_layers = len(depths) + 1 + self.num_group_tokens = num_group_tokens + self.num_output_groups = num_output_groups + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.dropout = dropout + self.attention_dropout = attention_dropout + self.initializer_range = initializer_range + self.scope = scope + + num_patches = (image_size // patch_size) ** 2 + # no [CLS] token for GroupViT + self.seq_length = num_patches + + def prepare_config_and_inputs(self): + + rng = random.Random(0) + pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size], rng=rng) + config = self.get_config() + + return config, pixel_values + + def get_config(self): + return GroupViTVisionConfig( + image_size=self.image_size, + patch_size=self.patch_size, + num_channels=self.num_channels, + hidden_size=self.hidden_size, + depths=self.depths, + num_group_tokens=self.num_group_tokens, + num_output_groups=self.num_output_groups, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + initializer_range=self.initializer_range, + ) + + def create_and_check_model(self, config, pixel_values): + model = TFGroupViTVisionModel(config=config) + result = model(pixel_values, training=False) + self.parent.assertEqual( + result.last_hidden_state.shape, (self.batch_size, self.num_output_groups[-1], self.hidden_size) + ) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, pixel_values = config_and_inputs + inputs_dict = {"pixel_values": pixel_values} + return config, inputs_dict + + +@require_tf +class TFGroupViTVisionModelTest(TFModelTesterMixin, unittest.TestCase): + """ + Here we also overwrite some of the tests of test_modeling_common.py, as GroupViT does not use input_ids, inputs_embeds, + attention_mask and seq_length. + """ + + all_model_classes = (TFGroupViTVisionModel,) if is_tf_available() else () + + test_pruning = False + test_resize_embeddings = False + test_head_masking = False + test_onnx = False + + def setUp(self): + self.model_tester = TFGroupViTVisionModelTester(self) + self.config_tester = ConfigTester( + self, config_class=GroupViTVisionConfig, has_text_modality=False, hidden_size=37 + ) + + def test_config(self): + self.config_tester.run_common_tests() + + @unittest.skip(reason="GroupViT does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="GroupViT does not use inputs_embeds") + def test_graph_mode_with_inputs_embeds(self): + pass + + def test_model_common_attributes(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) + x = model.get_output_embeddings() + self.assertTrue(x is None or isinstance(x, tf.keras.layers.Layer)) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.call) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = ["pixel_values"] + self.assertListEqual(arg_names[:1], expected_arg_names) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + seq_len = getattr(self.model_tester, "seq_length", None) + + expected_num_attention_outputs = sum(g > 0 for g in self.model_tester.num_group_tokens) + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) + attentions = outputs.attentions + # GroupViT returns attention grouping of each stage + self.assertEqual(len(attentions), sum(g > 0 for g in self.model_tester.num_group_tokens)) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) + attentions = outputs.attentions + # GroupViT returns attention grouping of each stage + self.assertEqual(len(attentions), expected_num_attention_outputs) + + out_len = len(outputs) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) + + added_hidden_states = 1 + self.assertEqual(out_len + added_hidden_states, len(outputs)) + + self_attentions = outputs.attentions + + # GroupViT returns attention grouping of each stage + self.assertEqual(len(self_attentions), expected_num_attention_outputs) + for i, self_attn in enumerate(self_attentions): + if self_attn is None: + continue + + self.assertListEqual( + list(self_attentions[i].shape[-2:]), + [ + self.model_tester.num_output_groups[i], + self.model_tester.num_output_groups[i - 1] if i > 0 else seq_len, + ], + ) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + + outputs = model(**self._prepare_for_class(inputs_dict, model_class), training=False) + + hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states + + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 + ) + self.assertEqual(len(hidden_states), expected_num_layers) + + seq_length = getattr(self.model_tester, "seq_length", None) + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [seq_length, self.model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + @is_pt_tf_cross_test + def test_pt_tf_model_equivalence(self): + # `GroupViT` computes some indices using argmax, uses them as + # one-hot encoding for further computation. The problem is + # while PT/TF have very small difference in `y_soft` (~ 1e-9), + # the argmax could be totally different, if there are at least + # 2 indices with almost identical values. This leads to very + # large difference in the outputs. We need specific seeds to + # avoid almost identical values happening in `y_soft`. + import torch + + seed = 338 + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + tf.random.set_seed(seed) + return super().test_pt_tf_model_equivalence() + + @slow + def test_model_from_pretrained(self): + for model_name in TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = TFGroupViTVisionModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + @slow + def test_saved_model_creation_extended(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.output_hidden_states = True + config.output_attentions = True + + if hasattr(config, "use_cache"): + config.use_cache = True + + seq_len = getattr(self.model_tester, "seq_length", None) + + for model_class in self.all_model_classes: + class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) + model = model_class(config) + num_out = len(model(class_inputs_dict)) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, saved_model=True) + saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") + model = tf.keras.models.load_model(saved_model_dir) + outputs = model(class_inputs_dict) + output_hidden_states = outputs["hidden_states"] + output_attentions = outputs["attentions"] + + # Check num outputs + self.assertEqual(len(outputs), num_out) + + # Check num layers + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 + ) + + self.assertEqual(len(output_hidden_states), expected_num_layers) + self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) + + # Check attention outputs + image_size = (self.model_tester.image_size, self.model_tester.image_size) + patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) + num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) + seq_len = num_patches + 1 + + self.assertListEqual( + list(output_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, seq_len, seq_len], + ) + + # Check hidden states + self.assertListEqual( + list(output_hidden_states[0].shape[-2:]), + [seq_len, self.model_tester.hidden_size], + ) + + +class TFGroupViTTextModelTester: + def __init__( + self, + parent, + batch_size=12, + seq_length=7, + is_training=True, + use_input_mask=True, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + dropout=0.1, + attention_dropout=0.1, + max_position_embeddings=512, + initializer_range=0.02, + scope=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.dropout = dropout + self.attention_dropout = attention_dropout + self.max_position_embeddings = max_position_embeddings + self.initializer_range = initializer_range + self.scope = scope + + def prepare_config_and_inputs(self): + rng = random.Random(0) + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size, rng=rng) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + # make sure the first token has attention mask `1` to ensure that, after combining the causal mask, there + # is still at least one token being attended to for each batch. + # TODO: Change `random_attention_mask` in PT/TF/Flax common test file, after a discussion with the team. + input_mask = tf.concat( + [tf.ones_like(input_mask[:, :1], dtype=input_mask.dtype), input_mask[:, 1:]], axis=-1 + ) + + config = self.get_config() + + return config, input_ids, input_mask + + def get_config(self): + return GroupViTTextConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + dropout=self.dropout, + attention_dropout=self.attention_dropout, + max_position_embeddings=self.max_position_embeddings, + initializer_range=self.initializer_range, + ) + + def create_and_check_model(self, config, input_ids, input_mask): + model = TFGroupViTTextModel(config=config) + result = model(input_ids, attention_mask=input_mask, training=False) + result = model(input_ids, training=False) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, input_mask = config_and_inputs + inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} + return config, inputs_dict + + +@require_tf +class TFGroupViTTextModelTest(TFModelTesterMixin, unittest.TestCase): + + all_model_classes = (TFGroupViTTextModel,) if is_tf_available() else () + test_pruning = False + test_head_masking = False + test_onnx = False + + def setUp(self): + self.model_tester = TFGroupViTTextModelTester(self) + self.config_tester = ConfigTester(self, config_class=GroupViTTextConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + @unittest.skip(reason="GroupViTTextModel does not use inputs_embeds") + def test_inputs_embeds(self): + pass + + @slow + def test_model_from_pretrained(self): + for model_name in TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = TFGroupViTTextModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + @slow + def test_saved_model_creation_extended(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.output_hidden_states = True + config.output_attentions = True + + if hasattr(config, "use_cache"): + config.use_cache = True + + for model_class in self.all_model_classes: + class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) + model = model_class(config) + num_out = len(model(class_inputs_dict)) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, saved_model=True) + saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") + model = tf.keras.models.load_model(saved_model_dir) + outputs = model(class_inputs_dict) + output_hidden_states = outputs["hidden_states"] + output_attentions = outputs["attentions"] + + # Check number of outputs + self.assertEqual(len(outputs), num_out) + + # Check number of layers + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 + ) + + # Check hidden states + self.assertEqual(len(output_hidden_states), expected_num_layers) + self.assertListEqual( + list(output_hidden_states[0].shape[-2:]), + [self.model_tester.seq_length, self.model_tester.hidden_size], + ) + + # Check attention outputs + self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) + + seq_length = self.model_tester.seq_length + key_length = getattr(self.model_tester, "key_length", seq_length) + + self.assertListEqual( + list(output_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, seq_length, key_length], + ) + + +class TFGroupViTModelTester: + def __init__(self, parent, is_training=True): + self.parent = parent + self.text_model_tester = TFGroupViTTextModelTester(parent) + self.vision_model_tester = TFGroupViTVisionModelTester(parent) + self.is_training = is_training + + def prepare_config_and_inputs(self): + text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() + vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() + + config = self.get_config() + + return config, input_ids, attention_mask, pixel_values + + def get_config(self): + return GroupViTConfig.from_text_vision_configs( + self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 + ) + + def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): + model = TFGroupViTModel(config) + result = model(input_ids, pixel_values, attention_mask, training=False) + self.parent.assertEqual( + result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) + ) + self.parent.assertEqual( + result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) + ) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + config, input_ids, attention_mask, pixel_values = config_and_inputs + inputs_dict = { + "input_ids": input_ids, + "attention_mask": attention_mask, + "pixel_values": pixel_values, + "return_loss": True, + } + return config, inputs_dict + + +@require_tf +class TFGroupViTModelTest(TFModelTesterMixin, unittest.TestCase): + all_model_classes = (TFGroupViTModel,) if is_tf_available() else () + test_head_masking = False + test_pruning = False + test_resize_embeddings = False + test_attention_outputs = False + test_onnx = False + + def setUp(self): + self.model_tester = TFGroupViTModelTester(self) + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + @unittest.skip(reason="hidden_states are tested in individual model tests") + def test_hidden_states_output(self): + pass + + @unittest.skip(reason="input_embeds are tested in individual model tests") + def test_inputs_embeds(self): + pass + + @unittest.skip(reason="CLIPModel does not have input/output embeddings") + def test_model_common_attributes(self): + pass + + @is_pt_tf_cross_test + def test_pt_tf_model_equivalence(self): + # `GroupViT` computes some indices using argmax, uses them as + # one-hot encoding for further computation. The problem is + # while PT/TF have very small difference in `y_soft` (~ 1e-9), + # the argmax could be totally different, if there are at least + # 2 indices with almost identical values. This leads to very + # large difference in the outputs. We need specific seeds to + # avoid almost identical values happening in `y_soft`. + import torch + + seed = 158 + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + tf.random.set_seed(seed) + return super().test_pt_tf_model_equivalence() + + # overwrite from common since `TFGroupViTModelTester` set `return_loss` to `True` and causes the preparation of + # `symbolic_inputs` failed. + def test_keras_save_load(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + # remove `return_loss` to make code work + if self.__class__.__name__ == "TFGroupViTModelTest": + inputs_dict.pop("return_loss", None) + + tf_main_layer_classes = set( + module_member + for model_class in self.all_model_classes + for module in (import_module(model_class.__module__),) + for module_member_name in dir(module) + if module_member_name.endswith("MainLayer") + # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. + and module_member_name[: -len("MainLayer")] == model_class.__name__[: -len("Model")] + for module_member in (getattr(module, module_member_name),) + if isinstance(module_member, type) + and tf.keras.layers.Layer in module_member.__bases__ + and getattr(module_member, "_keras_serializable", False) + ) + for main_layer_class in tf_main_layer_classes: + # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter + if "T5" in main_layer_class.__name__: + # Take the same values than in TFT5ModelTester for this shared layer + shared = TFSharedEmbeddings(99, 32, name="shared") + config.use_cache = inputs_dict.pop("use_cache", None) + main_layer = main_layer_class(config, embed_tokens=shared) + else: + main_layer = main_layer_class(config) + + symbolic_inputs = { + name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() + } + + model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs)) + outputs = model(inputs_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + filepath = os.path.join(tmpdirname, "keras_model.h5") + model.save(filepath) + if "T5" in main_layer_class.__name__: + model = tf.keras.models.load_model( + filepath, + custom_objects={ + main_layer_class.__name__: main_layer_class, + "TFSharedEmbeddings": TFSharedEmbeddings, + }, + ) + else: + model = tf.keras.models.load_model( + filepath, custom_objects={main_layer_class.__name__: main_layer_class} + ) + assert isinstance(model, tf.keras.Model) + after_outputs = model(inputs_dict) + self.assert_outputs_same(after_outputs, outputs) + + @slow + def test_model_from_pretrained(self): + for model_name in TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = TFGroupViTModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") + @slow + def test_saved_model_creation(self): + pass + + @unittest.skip(reason="Currently `saved_model` doesn't work with nested outputs.") + @slow + def test_saved_model_creation_extended(self): + pass + + @unittest.skip(reason="`saved_model` doesn't work with nested outputs so no preparation happens.") + @slow + def test_prepare_serving_output(self): + pass + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + im = Image.open(requests.get(url, stream=True).raw) + return im + + +@require_vision +@require_tf +class TFGroupViTModelIntegrationTest(unittest.TestCase): + @slow + def test_inference(self): + model_name = "nvidia/groupvit-gcc-yfcc" + model = TFGroupViTModel.from_pretrained(model_name) + processor = CLIPProcessor.from_pretrained(model_name) + + image = prepare_img() + inputs = processor( + text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="tf" + ) + + outputs = model(**inputs, training=False) + + # verify the logits + self.assertEqual( + outputs.logits_per_image.shape, + tf.TensorShape((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), + ) + self.assertEqual( + outputs.logits_per_text.shape, + tf.TensorShape((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), + ) + + expected_logits = tf.constant([[13.3523, 6.3629]]) + + tf.debugging.assert_near(outputs.logits_per_image, expected_logits, atol=1e-3) diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index 9977578b51b068..b93d8f17e4e11d 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -757,7 +757,7 @@ def test_compile_tf_model(self): name="pixel_values", dtype="float32", ) - elif model_class.__name__ in ["TFCLIPModel"]: + elif model_class.__name__ in ["TFCLIPModel", "TFGroupViTModel"]: inputs = { "input_ids": tf.keras.Input(batch_shape=(3, max_input), name="input_ids", dtype="int32"), "pixel_values": tf.keras.Input( diff --git a/utils/check_repo.py b/utils/check_repo.py index 988967e797d12e..5a6e4bd24347c2 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -163,6 +163,8 @@ "GroupViTVisionModel", "TFCLIPTextModel", "TFCLIPVisionModel", + "TFGroupViTTextModel", + "TFGroupViTVisionModel", "FlaxCLIPTextModel", "FlaxCLIPVisionModel", "FlaxWav2Vec2ForCTC", diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 48fc71d6f6b2ef..eb1570d6c3145a 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -39,6 +39,8 @@ src/transformers/models/electra/modeling_tf_electra.py src/transformers/models/glpn/modeling_glpn.py src/transformers/models/gpt2/modeling_gpt2.py src/transformers/models/gptj/modeling_gptj.py +src/transformers/models/groupvit/modeling_groupvit.py +src/transformers/models/groupvit/modeling_tf_groupvit.py src/transformers/models/hubert/modeling_hubert.py src/transformers/models/layoutlm/modeling_layoutlm.py src/transformers/models/layoutlm/modeling_tf_layoutlm.py From ba9e336fa3e388581c081b529967ad46bc1c50d9 Mon Sep 17 00:00:00 2001 From: mustapha ajeghrir <66799406+Mustapha-AJEGHRIR@users.noreply.github.com> Date: Thu, 29 Sep 2022 13:27:58 +0200 Subject: [PATCH 413/539] Fix `m2m_100.mdx` doc example missing `labels` (#19149) The `labels` variable is not defined, the `model_inputs` already contain this information. --- docs/source/en/model_doc/m2m_100.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/en/model_doc/m2m_100.mdx b/docs/source/en/model_doc/m2m_100.mdx index f0a7714d2418d2..10ac6a9df918cd 100644 --- a/docs/source/en/model_doc/m2m_100.mdx +++ b/docs/source/en/model_doc/m2m_100.mdx @@ -57,7 +57,7 @@ tgt_text = "La vie est comme une boîte de chocolat." model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt") -loss = model(**model_inputs, labels=labels) # forward pass +loss = model(**model_inputs).loss # forward pass ``` - Generation From 3a27ba3d18b9691926b296b0a6a483313b0299ba Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Thu, 29 Sep 2022 13:40:55 +0200 Subject: [PATCH 414/539] Fix opt softmax small nit (#19243) * fix opt softmax nit - Use the same logic as 1eb09537550734a783c194e416029cb9bc4cb119 for consistency * Update src/transformers/models/opt/modeling_opt.py --- src/transformers/models/opt/modeling_opt.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 567e08d36f3939..9ede3cabb8f762 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -218,11 +218,10 @@ def forward( attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) - dtype_attn_weights = attn_weights.dtype # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 - if dtype_attn_weights == torch.float16: - attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(dtype_attn_weights) + if attn_weights.dtype == torch.float16: + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(torch.float16) else: attn_weights = nn.functional.softmax(attn_weights, dim=-1) From 902d30b31a5d0f4c3c7359e526552de9fb841b00 Mon Sep 17 00:00:00 2001 From: Lucain Date: Thu, 29 Sep 2022 14:58:39 +0200 Subject: [PATCH 415/539] Use `hf_raise_for_status` instead of deprecated `_raise_for_status` (#19244) * Use instead of from huggingface_hub * bump huggingface_hub to 0.10.0 + make deps_table_update --- setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- src/transformers/utils/hub.py | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/setup.py b/setup.py index 4d7b4e85382621..f2e533ce98710e 100644 --- a/setup.py +++ b/setup.py @@ -116,7 +116,7 @@ "fugashi>=1.0", "GitPython<3.1.19", "hf-doc-builder>=0.3.0", - "huggingface-hub>=0.9.0,<1.0", + "huggingface-hub>=0.10.0,<1.0", "importlib_metadata", "ipadic>=1.0.0,<2.0", "isort>=5.5.4", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index bfcb0fc8699b1e..fae26de7bbab0a 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -22,7 +22,7 @@ "fugashi": "fugashi>=1.0", "GitPython": "GitPython<3.1.19", "hf-doc-builder": "hf-doc-builder>=0.3.0", - "huggingface-hub": "huggingface-hub>=0.9.0,<1.0", + "huggingface-hub": "huggingface-hub>=0.10.0,<1.0", "importlib_metadata": "importlib_metadata", "ipadic": "ipadic>=1.0.0,<2.0", "isort": "isort>=5.5.4", diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 8c149bec648900..2d24b233f8d015 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -45,6 +45,7 @@ LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, + hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm @@ -607,7 +608,7 @@ def has_file( r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10) try: - huggingface_hub.utils._errors._raise_for_status(r) + hf_raise_for_status(r) return True except RepositoryNotFoundError as e: logger.error(e) @@ -993,7 +994,7 @@ def get_hub_metadata(url, token=None): r = huggingface_hub.file_download._request_with_retry( method="HEAD", url=url, headers=headers, allow_redirects=False ) - huggingface_hub.file_download._raise_for_status(r) + hf_raise_for_status(r) commit_hash = r.headers.get(HUGGINGFACE_HEADER_X_REPO_COMMIT) etag = r.headers.get(HUGGINGFACE_HEADER_X_LINKED_ETAG) or r.headers.get("ETag") if etag is not None: From b79028f0b69886e2a9238482c848aa5556d890b2 Mon Sep 17 00:00:00 2001 From: atturaioe <76523524+atturaioe@users.noreply.github.com> Date: Thu, 29 Sep 2022 16:13:56 +0300 Subject: [PATCH 416/539] Fix TrainingArgs argument serialization (#19239) --- src/transformers/training_args.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/transformers/training_args.py b/src/transformers/training_args.py index 4eb8f47e14ef60..170315fe2d49ed 100644 --- a/src/transformers/training_args.py +++ b/src/transformers/training_args.py @@ -17,7 +17,7 @@ import math import os import warnings -from dataclasses import asdict, dataclass, field +from dataclasses import asdict, dataclass, field, fields from datetime import timedelta from enum import Enum from pathlib import Path @@ -1000,10 +1000,6 @@ def __post_init__(self): if env_local_rank != -1 and env_local_rank != self.local_rank: self.local_rank = env_local_rank - # convert to int - self.log_level = trainer_log_levels[self.log_level] - self.log_level_replica = trainer_log_levels[self.log_level_replica] - # expand paths, if not os.makedirs("~/bar") will make directory # in the current directory instead of the actual home #  see https://github.com/huggingface/transformers/issues/10628 @@ -1604,8 +1600,12 @@ def get_process_log_level(self): The choice between the main and replica process settings is made according to the return value of `should_log`. """ - log_level_main_node = logging.INFO if self.log_level == -1 else self.log_level - log_level_replica_node = logging.WARNING if self.log_level_replica == -1 else self.log_level_replica + # convert to int + log_level = trainer_log_levels[self.log_level] + log_level_replica = trainer_log_levels[self.log_level_replica] + + log_level_main_node = logging.INFO if log_level == -1 else log_level + log_level_replica_node = logging.WARNING if log_level_replica == -1 else log_level_replica return log_level_main_node if self.should_log else log_level_replica_node @property @@ -1691,7 +1691,9 @@ def to_dict(self): Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ - d = asdict(self) + # filter out fields that are defined as field(init=False) + d = dict((field.name, getattr(self, field.name)) for field in fields(self) if field.init) + for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value From 655f72a6896c0533b1bdee519ed65a059c2425ac Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 29 Sep 2022 09:36:42 -0400 Subject: [PATCH 417/539] Fix test fetching for examples (#19237) * Fix test fetching for examples * Fake example modif * Debug statements * Typo * You need to persist the file... * Revert change in example * Remove debug statements --- .circleci/config.yml | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 0143cdeb77a5e0..06c621621f6a2d 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -83,11 +83,21 @@ jobs: else touch test_preparation/test_list.txt fi + - run: python utils/tests_fetcher.py --filters tests examples | tee examples_tests_fetched_summary.txt + - store_artifacts: + path: ~/transformers/examples_tests_fetched_summary.txt + - run: | + if [ -f test_list.txt ]; then + mv test_list.txt test_preparation/examples_test_list.txt + else + touch test_preparation/examples_test_list.txt + fi - persist_to_workspace: root: test_preparation/ paths: test_list.txt + examples_test_list.txt # To run all tests for the nightly build fetch_all_tests: @@ -99,6 +109,7 @@ jobs: - run: | mkdir test_preparation echo "tests" > test_preparation/test_list.txt + echo "tests" > test_preparation/examples_test_list.txt - persist_to_workspace: root: test_preparation/ @@ -426,7 +437,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/examples_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -463,7 +474,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/examples_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -499,7 +510,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/examples_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi From 01eb34ab45a8895fbd9e335568290e5d0f5f4491 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Thu, 29 Sep 2022 17:33:13 +0300 Subject: [PATCH 418/539] Improve DETR post-processing methods (#19205) * Ensures consistent arguments and outputs with other post-processing methods * Adds post_process_semantic_segmentation, post_process_instance_segmentation, post_process_panoptic_segmentation, post_process_object_detection methods to DetrFeatureExtractor * Adds deprecation warnings to post_process, post_process_segmentation and post_process_panoptic --- .../feature_extraction_conditional_detr.py | 18 +- .../modeling_conditional_detr.py | 15 +- .../feature_extraction_deformable_detr.py | 18 +- .../models/detr/feature_extraction_detr.py | 423 +++++++++++++++++- src/transformers/models/detr/modeling_detr.py | 37 +- .../models/yolos/feature_extraction_yolos.py | 23 +- 6 files changed, 502 insertions(+), 32 deletions(-) diff --git a/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py index 4467f2900bb5ee..3d61b81fc794c7 100644 --- a/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py +++ b/src/transformers/models/conditional_detr/feature_extraction_conditional_detr.py @@ -16,6 +16,7 @@ import io import pathlib +import warnings from collections import defaultdict from typing import Dict, List, Optional, Union @@ -555,7 +556,7 @@ def __call__( if annotations is not None: annotations = [annotations] - # Create deep copies to avoid editing inputs in place + # Create a copy of the list to avoid editing it in place images = [image for image in images] if annotations is not None: @@ -753,6 +754,11 @@ def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_t `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_semantic_segmentation`.", + FutureWarning, + ) out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = [] @@ -801,6 +807,11 @@ def post_process_instance(self, results, outputs, orig_target_sizes, max_target_ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_instance` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_instance_segmentation`.", + FutureWarning, + ) if len(orig_target_sizes) != len(max_target_sizes): raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes") @@ -845,6 +856,11 @@ def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_ `List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_panoptic is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_panoptic_segmentation`.", + FutureWarning, + ) if target_sizes is None: target_sizes = processed_sizes if len(processed_sizes) != len(target_sizes): diff --git a/src/transformers/models/conditional_detr/modeling_conditional_detr.py b/src/transformers/models/conditional_detr/modeling_conditional_detr.py index 626f19010653f0..7d3cefbfb1eadb 100644 --- a/src/transformers/models/conditional_detr/modeling_conditional_detr.py +++ b/src/transformers/models/conditional_detr/modeling_conditional_detr.py @@ -153,8 +153,8 @@ class ConditionalDetrObjectDetectionOutput(ModelOutput): pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding - possible padding). You can use [`~ConditionalDetrFeatureExtractor.post_process`] to retrieve the - unnormalized bounding boxes. + possible padding). You can use [`~ConditionalDetrFeatureExtractor.post_process_object_detection`] to + retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and @@ -217,13 +217,14 @@ class ConditionalDetrSegmentationOutput(ModelOutput): pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding - possible padding). You can use [`~ConditionalDetrFeatureExtractor.post_process`] to retrieve the - unnormalized bounding boxes. + possible padding). You can use [`~ConditionalDetrFeatureExtractor.post_process_object_detection`] to + retrieve the unnormalized bounding boxes. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): Segmentation masks logits for all queries. See also - [`~ConditionalDetrFeatureExtractor.post_process_segmentation`] or - [`~ConditionalDetrFeatureExtractor.post_process_panoptic`] to evaluate instance and panoptic segmentation - masks respectively. + [`~ConditionalDetrFeatureExtractor.post_process_semantic_segmentation`] or + [`~ConditionalDetrFeatureExtractor.post_process_instance_segmentation`] + [`~ConditionalDetrFeatureExtractor.post_process_panoptic_segmentation`] to evaluate semantic, instance and + panoptic segmentation masks respectively. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and diff --git a/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py b/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py index 61e7a70d9f1c23..85be73c01e3810 100644 --- a/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py +++ b/src/transformers/models/deformable_detr/feature_extraction_deformable_detr.py @@ -16,6 +16,7 @@ import io import pathlib +import warnings from collections import defaultdict from typing import Dict, List, Optional, Union @@ -555,7 +556,7 @@ def __call__( if annotations is not None: annotations = [annotations] - # Create deep copies to avoid editing inputs in place + # Create a copy of the list to avoid editing it in place images = [image for image in images] if annotations is not None: @@ -750,6 +751,11 @@ def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_t `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_semantic_segmentation`.", + FutureWarning, + ) out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = [] @@ -798,6 +804,11 @@ def post_process_instance(self, results, outputs, orig_target_sizes, max_target_ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_instance` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_instance_segmentation`.", + FutureWarning, + ) if len(orig_target_sizes) != len(max_target_sizes): raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes") @@ -842,6 +853,11 @@ def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_ `List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_panoptic is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_panoptic_segmentation`.", + FutureWarning, + ) if target_sizes is None: target_sizes = processed_sizes if len(processed_sizes) != len(target_sizes): diff --git a/src/transformers/models/detr/feature_extraction_detr.py b/src/transformers/models/detr/feature_extraction_detr.py index 2c0c68991be5d1..3ede3662a17064 100644 --- a/src/transformers/models/detr/feature_extraction_detr.py +++ b/src/transformers/models/detr/feature_extraction_detr.py @@ -16,8 +16,9 @@ import io import pathlib +import warnings from collections import defaultdict -from typing import Dict, List, Optional, Union +from typing import Dict, List, Optional, Set, Tuple, Union import numpy as np from PIL import Image @@ -119,6 +120,54 @@ def id_to_rgb(id_map): return color +def binary_mask_to_rle(mask): + """ + Args: + Converts given binary mask of shape (height, width) to the run-length encoding (RLE) format. + mask (`torch.Tensor` or `numpy.array`): + A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target + segment_id or class_id. + Returns: + `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE + format. + """ + if is_torch_tensor(mask): + mask = mask.numpy() + + pixels = mask.flatten() + pixels = np.concatenate([[0], pixels, [0]]) + runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 + runs[1::2] -= runs[::2] + return [x for x in runs] + + +def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): + """ + Args: + Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` + and `labels`. + masks (`torch.Tensor`): + A tensor of shape `(num_queries, height, width)`. + scores (`torch.Tensor`): + A tensor of shape `(num_queries)`. + labels (`torch.Tensor`): + A tensor of shape `(num_queries)`. + object_mask_threshold (`float`): + A number between 0 and 1 used to binarize the masks. + Raises: + `ValueError`: Raised when the first dimension doesn't match in all input tensors. + Returns: + `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region + < `object_mask_threshold`. + """ + if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): + raise ValueError("mask, scores and labels must have the same shape!") + + to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) + + return masks[to_keep], scores[to_keep], labels[to_keep] + + class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a DETR feature extractor. @@ -547,7 +596,7 @@ def __call__( if annotations is not None: annotations = [annotations] - # Create deep copies to avoid editing inputs in place + # Create a copy of the list to avoid editing it in place images = [image for image in images] if annotations is not None: @@ -699,6 +748,12 @@ def post_process(self, outputs, target_sizes): `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_object_detection`", + FutureWarning, + ) + out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): @@ -717,7 +772,6 @@ def post_process(self, outputs, target_sizes): boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] - return results def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_threshold=0.5): @@ -738,6 +792,11 @@ def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_t `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_semantic_segmentation`.", + FutureWarning, + ) out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = [] @@ -786,6 +845,11 @@ def post_process_instance(self, results, outputs, orig_target_sizes, max_target_ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_instance` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_instance_segmentation`.", + FutureWarning, + ) if len(orig_target_sizes) != len(max_target_sizes): raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes") @@ -829,6 +893,11 @@ def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_ `List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_panoptic is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_panoptic_segmentation`.", + FutureWarning, + ) if target_sizes is None: target_sizes = processed_sizes if len(processed_sizes) != len(target_sizes): @@ -939,3 +1008,351 @@ def get_ids_area(masks, scores, dedup=False): predictions = {"png_string": out.getvalue(), "segments_info": segments_info} preds.append(predictions) return preds + + def post_process_object_detection( + self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None + ): + """ + Converts the output of [`DetrForObjectDetection`] into the format expected by the COCO api. Only supports + PyTorch. + + Args: + outputs ([`DetrObjectDetectionOutput`]): + Raw outputs of the model. + threshold (`float`, *optional*): + Score threshold to keep object detection predictions. + target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*, defaults to `None`): + Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size + (height, width) of each image in the batch. If left to None, predictions will not be resized. + + Returns: + `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image + in the batch as predicted by the model. + """ + out_logits, out_bbox = outputs.logits, outputs.pred_boxes + + if target_sizes is not None: + if len(out_logits) != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + prob = nn.functional.softmax(out_logits, -1) + scores, labels = prob[..., :-1].max(-1) + + # Convert to [x0, y0, x1, y1] format + boxes = center_to_corners_format(out_bbox) + + # Convert from relative [0, 1] to absolute [0, height] coordinates + if target_sizes is not None: + if isinstance(target_sizes, List): + img_h = torch.Tensor([i[0] for i in target_sizes]) + img_w = torch.Tensor([i[1] for i in target_sizes]) + else: + img_h, img_w = target_sizes.unbind(1) + + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + boxes = boxes * scale_fct[:, None, :] + + results = [] + for s, l, b in zip(scores, labels, boxes): + score = s[s > threshold] + label = l[s > threshold] + box = b[s > threshold] + results.append({"scores": score, "labels": label, "boxes": box}) + + return results + + def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): + """ + Args: + Converts the output of [`DetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. + outputs ([`DetrForSegmentation`]): + Raw outputs of the model. + target_sizes (`List[Tuple[int, int]]`, *optional*, defaults to `None`): + A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the + batch. If left to None, predictions will not be resized. + Returns: + `List[torch.Tensor]`: + A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) + corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each + `torch.Tensor` correspond to a semantic class id. + """ + class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] + + # Remove the null class `[..., :-1]` + masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] + masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] + + # Semantic segmentation logits of shape (batch_size, num_classes, height, width) + segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) + batch_size = class_queries_logits.shape[0] + + # Resize logits and compute semantic segmentation maps + if target_sizes is not None: + if batch_size != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) + + semantic_segmentation = [] + for idx in range(batch_size): + resized_logits = torch.nn.functional.interpolate( + segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False + ) + semantic_map = resized_logits[0].argmax(dim=0) + semantic_segmentation.append(semantic_map) + else: + semantic_segmentation = segmentation.argmax(dim=1) + semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] + + return semantic_segmentation + + def post_process_instance_segmentation( + self, + outputs, + threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + target_sizes: List[Tuple] = None, + return_coco_annotation: Optional[bool] = False, + ): + """ + Args: + Converts the output of [`DetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. + outputs ([`DetrForSegmentation`]): + Raw outputs of the model. + threshold (`float`, *optional*): + The probability score threshold to keep predicted instance masks, defaults to 0.5. + overlap_mask_area_threshold (`float`, *optional*): + The overlap mask area threshold to merge or discard small disconnected parts within each binary + instance mask, defaults to 0.8. + target_sizes (`List[Tuple]`, *optional*, defaults to `None`): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction. If left to None, predictions will not be resized. + return_coco_annotation (`bool`, *optional*, defaults to `False`): + If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. + Returns: + `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: + - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or + `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_format is set to `True`. + - **segment_ids** -- A dictionary that maps segment ids to semantic class ids. + - **id** -- An integer representing the `segment_id`. + - **label_id** -- An integer representing the segment's label / semantic class id. + """ + class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] + + batch_size = class_queries_logits.shape[0] + num_labels = class_queries_logits.shape[-1] - 1 + + mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] + + # Predicted label and score of each query (batch_size, num_queries) + pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) + + # Loop over items in batch size + results: List[Dict[str, TensorType]] = [] + + for i in range(batch_size): + mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( + mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels + ) + + height, width = target_sizes[i][0], target_sizes[i][1] + segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs_item.device) + segments: List[Dict] = [] + + object_detected = mask_probs_item.shape[0] > 0 + + if object_detected: + # Resize mask to corresponding target_size + if target_sizes is not None: + mask_probs_item = torch.nn.functional.interpolate( + mask_probs_item.unsqueeze(0), + size=target_sizes[i], + mode="bilinear", + align_corners=False, + )[0] + + current_segment_id = 0 + + # Weigh each mask by its prediction score + mask_probs_item *= pred_scores_item.view(-1, 1, 1) + mask_labels_item = mask_probs_item.argmax(0) # [height, width] + + # Keep track of instances of each class + stuff_memory_list: Dict[str, int] = {} + for k in range(pred_labels_item.shape[0]): + # Get the mask associated with the k class + pred_class = pred_labels_item[k].item() + mask_k = mask_labels_item == k + mask_k_area = mask_k.sum() + + # Compute the area of all the stuff in query k + original_area = (mask_probs_item[k] >= 0.5).sum() + mask_exists = mask_k_area > 0 and original_area > 0 + + if mask_exists: + # Eliminate segments with mask area below threshold + area_ratio = mask_k_area / original_area + if not area_ratio.item() > overlap_mask_area_threshold: + continue + + # Add corresponding class id + if pred_class in stuff_memory_list: + current_segment_id = stuff_memory_list[pred_class] + else: + current_segment_id += 1 + + # Add current object segment to final segmentation map + segmentation[mask_k] = current_segment_id + segments.append( + { + "id": current_segment_id, + "label_id": pred_class, + } + ) + else: + segmentation -= 1 + + # Return segmentation map in run-length encoding (RLE) format + if return_coco_annotation: + segment_ids = torch.unique(segmentation) + + run_length_encodings = [] + for idx in segment_ids: + mask = torch.where(segmentation == idx, 1, 0) + rle = binary_mask_to_rle(mask) + run_length_encodings.append(rle) + + segmentation = run_length_encodings + + results.append({"segmentation": segmentation, "segment_ids": segments}) + return results + + def post_process_panoptic_segmentation( + self, + outputs, + threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + label_ids_to_fuse: Optional[Set[int]] = None, + target_sizes: List[Tuple] = None, + ) -> List[Dict]: + """ + Args: + Converts the output of [`DetrForSegmentation`] into image panoptic segmentation predictions. Only supports + PyTorch. + outputs ([`DetrForSegmentation`]): + The outputs from [`DetrForSegmentation`]. + threshold (`float`, *optional*, defaults to 0.5): + The probability score threshold to keep predicted instance masks. + overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): + The overlap mask area threshold to merge or discard small disconnected parts within each binary + instance mask. + label_ids_to_fuse (`Set[int]`, *optional*, defaults to `None`): + The labels in this state will have all their instances be fused together. For instance we could say + there can only be one sky in an image, but several persons, so the label ID for sky would be in that + set, but not the one for person. + target_sizes (`List[Tuple]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction in batch. If left to None, predictions will not be + resized. + Returns: + `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: + - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`. If + `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. + - **segment_ids** -- A dictionary that maps segment ids to semantic class ids. + - **id** -- An integer representing the `segment_id`. + - **label_id** -- An integer representing the segment's label / semantic class id. + - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. + Multiple instances of the same class / label were fused and assigned a single `segment_id`. + """ + + if label_ids_to_fuse is None: + warnings.warn("`label_ids_to_fuse` unset. No instance will be fused.") + label_ids_to_fuse = set() + + class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] + + batch_size = class_queries_logits.shape[0] + num_labels = class_queries_logits.shape[-1] - 1 + + mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] + + # Predicted label and score of each query (batch_size, num_queries) + pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) + + # Loop over items in batch size + results: List[Dict[str, TensorType]] = [] + + for i in range(batch_size): + mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( + mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels + ) + + height, width = target_sizes[i][0], target_sizes[i][1] + segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs_item.device) + segments: List[Dict] = [] + + object_detected = mask_probs_item.shape[0] > 0 + + if object_detected: + # Resize mask to corresponding target_size + if target_sizes is not None: + mask_probs_item = torch.nn.functional.interpolate( + mask_probs_item.unsqueeze(0), + size=target_sizes[i], + mode="bilinear", + align_corners=False, + )[0] + + current_segment_id = 0 + + # Weigh each mask by its prediction score + mask_probs_item *= pred_scores_item.view(-1, 1, 1) + mask_labels_item = mask_probs_item.argmax(0) # [height, width] + + # Keep track of instances of each class + stuff_memory_list: Dict[str, int] = {} + for k in range(pred_labels_item.shape[0]): + pred_class = pred_labels_item[k].item() + should_fuse = pred_class in label_ids_to_fuse + + # Get the mask associated with the k class + mask_k = mask_labels_item == k + mask_k_area = mask_k.sum() + + # Compute the area of all the stuff in query k + original_area = (mask_probs_item[k] >= 0.5).sum() + mask_exists = mask_k_area > 0 and original_area > 0 + + if mask_exists: + # Eliminate disconnected tiny segments + area_ratio = mask_k_area / original_area + if not area_ratio.item() > overlap_mask_area_threshold: + continue + + # Add corresponding class id + if pred_class in stuff_memory_list: + current_segment_id = stuff_memory_list[pred_class] + else: + current_segment_id += 1 + + # Add current object segment to final segmentation map + segmentation[mask_k] = current_segment_id + segments.append( + { + "id": current_segment_id, + "label_id": pred_class, + "was_fused": should_fuse, + } + ) + if should_fuse: + stuff_memory_list[pred_class] = current_segment_id + else: + segmentation -= 1 + + results.append({"segmentation": segmentation, "segment_ids": segments}) + return results diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 98f5f72f7e4353..dc5b562626588c 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -148,8 +148,8 @@ class DetrObjectDetectionOutput(ModelOutput): pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding - possible padding). You can use [`~DetrFeatureExtractor.post_process`] to retrieve the unnormalized bounding - boxes. + possible padding). You can use [`~DetrFeatureExtractor.post_process_object_detection`] to retrieve the + unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and @@ -211,12 +211,14 @@ class DetrSegmentationOutput(ModelOutput): pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding - possible padding). You can use [`~DetrFeatureExtractor.post_process`] to retrieve the unnormalized bounding - boxes. + possible padding). You can use [`~DetrFeatureExtractor.post_process_object_detection`] to retrieve the + unnormalized bounding boxes. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): - Segmentation masks logits for all queries. See also [`~DetrFeatureExtractor.post_process_segmentation`] or - [`~DetrFeatureExtractor.post_process_panoptic`] to evaluate instance and panoptic segmentation masks - respectively. + Segmentation masks logits for all queries. See also + [`~DetrFeatureExtractor.post_process_semantic_segmentation`] or + [`~DetrFeatureExtractor.post_process_instance_segmentation`] + [`~DetrFeatureExtractor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic + segmentation masks respectively. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and @@ -1424,7 +1426,7 @@ def forward( >>> # convert outputs (bounding boxes and class logits) to COCO API >>> target_sizes = torch.tensor([image.size[::-1]]) - >>> results = feature_extractor.post_process(outputs, target_sizes=target_sizes)[0] + >>> results = feature_extractor.post_process_object_detection(outputs, target_sizes=target_sizes)[0] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] @@ -1601,17 +1603,14 @@ def forward( >>> # forward pass >>> outputs = model(**inputs) - >>> # use the `post_process_panoptic` method of `DetrFeatureExtractor` to convert to COCO format - >>> processed_sizes = torch.as_tensor(inputs["pixel_values"].shape[-2:]).unsqueeze(0) - >>> result = feature_extractor.post_process_panoptic(outputs, processed_sizes)[0] - - >>> # the segmentation is stored in a special-format png - >>> panoptic_seg = Image.open(io.BytesIO(result["png_string"])) - >>> panoptic_seg = numpy.array(panoptic_seg, dtype=numpy.uint8) - >>> # retrieve the ids corresponding to each mask - >>> panoptic_seg_id = rgb_to_id(panoptic_seg) - >>> panoptic_seg_id.shape - (800, 1066) + >>> # Use the `post_process_panoptic_segmentation` method of `DetrFeatureExtractor` to retrieve post-processed panoptic segmentation maps + >>> # Segmentation results are returned as a list of dictionaries + >>> result = feature_extractor.post_process_panoptic_segmentation(outputs, processed_sizes) + + >>> # A tensor of shape (height, width) where each value denotes a segment id + >>> panoptic_seg = result[0]["segmentation"] + >>> # Get mapping of segment ids to semantic class ids + >>> panoptic_segments_info = result[0]["segment_ids"] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict diff --git a/src/transformers/models/yolos/feature_extraction_yolos.py b/src/transformers/models/yolos/feature_extraction_yolos.py index 616d8e4849dc4d..7239986d587a84 100644 --- a/src/transformers/models/yolos/feature_extraction_yolos.py +++ b/src/transformers/models/yolos/feature_extraction_yolos.py @@ -16,6 +16,7 @@ import io import pathlib +import warnings from collections import defaultdict from typing import Dict, List, Optional, Union @@ -674,6 +675,12 @@ def post_process(self, outputs, target_sizes): `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_object_detection`", + FutureWarning, + ) + out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): @@ -692,7 +699,6 @@ def post_process(self, outputs, target_sizes): boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] - return results # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.post_process_segmentation @@ -714,6 +720,11 @@ def post_process_segmentation(self, outputs, target_sizes, threshold=0.9, mask_t `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, and masks for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_semantic_segmentation`.", + FutureWarning, + ) out_logits, raw_masks = outputs.logits, outputs.pred_masks preds = [] @@ -762,6 +773,11 @@ def post_process_instance(self, results, outputs, orig_target_sizes, max_target_ `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels, boxes and masks for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_instance` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_instance_segmentation`.", + FutureWarning, + ) if len(orig_target_sizes) != len(max_target_sizes): raise ValueError("Make sure to pass in as many orig_target_sizes as max_target_sizes") @@ -805,6 +821,11 @@ def post_process_panoptic(self, outputs, processed_sizes, target_sizes=None, is_ `List[Dict]`: A list of dictionaries, each dictionary containing a PNG string and segments_info values for an image in the batch as predicted by the model. """ + warnings.warn( + "`post_process_panoptic is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_panoptic_segmentation`.", + FutureWarning, + ) if target_sizes is None: target_sizes = processed_sizes if len(processed_sizes) != len(target_sizes): From cca6e6fea1f29190bd3d2e904a4c8934be29ff63 Mon Sep 17 00:00:00 2001 From: Matt Date: Thu, 29 Sep 2022 16:51:08 +0100 Subject: [PATCH 419/539] Cast TF generate() inputs (#19232) * Just stick a couple of casts into generate() * Cast decoder_input_ids too * Don't accidentally cast floats * Move to _generate() * Move to after input validation Co-authored-by: Your Name --- src/transformers/generation_tf_utils.py | 50 ++++++++++++++++++------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index 4d39a81b7b7236..beff677136aa02 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -1533,11 +1533,35 @@ def _generate( # generate sequences without allowing bad_words to be generated outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) ```""" + # 0. Validate the `.generate()` call self._validate_model_class() self._validate_model_kwargs(model_kwargs.copy()) - # 1. Set generation parameters if not already defined + # 1. Cast input dtypes to tf.int32 unless they're floats (which happens for some image models) + if input_ids is not None: + if isinstance(input_ids, tf.Tensor) and input_ids.dtype.is_floating: + pass + elif isinstance(input_ids, np.ndarray) and np.issubdtype(input_ids.dtype, np.floating): + pass + else: + input_ids = tf.cast(input_ids, tf.int32) + if attention_mask is not None: + attention_mask = tf.cast(attention_mask, tf.int32) + if "decoder_input_ids" in model_kwargs: + if ( + isinstance(model_kwargs["decoder_input_ids"], tf.Tensor) + and model_kwargs["decoder_input_ids"].dtype.is_floating + ): + pass + elif isinstance(model_kwargs["decoder_input_ids"], np.ndarray) and np.issubdtype( + model_kwargs["decoder_input_ids"].dtype, np.floating + ): + pass + else: + model_kwargs["decoder_input_ids"] = tf.cast(model_kwargs["decoder_input_ids"], tf.int32) + + # 2. Set generation parameters if not already defined length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping @@ -1582,12 +1606,12 @@ def _generate( "The selected model does not support Graph mode nor XLA generation (e.g. from tf.function())" ) - # 2. Define model inputs + # 3. Define model inputs input_ids = self._prepare_model_inputs(input_ids, bos_token_id) # inputs_ids now has to be defined and cannot be None anymore batch_size = shape_list(input_ids)[0] - # 3. Prepare other model kwargs + # 4. Prepare other model kwargs if output_attentions is not None: model_kwargs["output_attentions"] = output_attentions if output_hidden_states is not None: @@ -1613,7 +1637,7 @@ def _generate( "generation results, please set `padding_side='left'` when initializing the tokenizer." ) - # 4. Prepare model inputs which will be used for auto-regressive generation + # 5. Prepare model inputs which will be used for auto-regressive generation if self.config.is_encoder_decoder: # if encoder-decoder, we create encoder_outputs and add to `model_kwargs` model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, model_kwargs) @@ -1625,7 +1649,7 @@ def _generate( model_kwargs=model_kwargs, ) - # 5. Prepare `max_length` depending on other stopping criteria. + # 6. Prepare `max_length` depending on other stopping criteria. input_ids_seq_length = input_ids.shape[-1] if max_length is None and max_new_tokens is None: warnings.warn( @@ -1661,13 +1685,13 @@ def _generate( "`max_new_tokens`." ) - # 6. determine generation mode + # 7. determine generation mode # TODO(Matt, Joao, Patrick) - add more use cases here is_greedy_gen_mode = (num_beams == 1) and do_sample is False is_sample_gen_mode = (num_beams == 1) and do_sample is True is_beam_gen_mode = (num_beams > 1) and do_sample is False - # 7. prepare distribution pre_processing samplers + # 8. prepare distribution pre_processing samplers logits_processor = self._get_logits_processor( repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, @@ -1679,7 +1703,7 @@ def _generate( forced_eos_token_id=forced_eos_token_id, ) - # 8. go into different generation modes + # 9. go into different generation modes if is_greedy_gen_mode: if num_return_sequences > 1: raise ValueError( @@ -1697,10 +1721,10 @@ def _generate( **model_kwargs, ) elif is_sample_gen_mode: - # 9. prepare logits warper + # 10. prepare logits warper logits_warper = self._get_logits_warper(top_k=top_k, top_p=top_p, temperature=temperature) - # 10. expand input_ids with `num_return_sequences` additional sequences per batch + # 11. expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_return_sequences, @@ -1708,7 +1732,7 @@ def _generate( **model_kwargs, ) - # 11. run sample + # 12. run sample return self.sample( input_ids, logits_processor=logits_processor, @@ -1729,7 +1753,7 @@ def _generate( f"num_beams >= num_return_sequences, got {num_beams} and {num_return_sequences} (respectivelly)" ) - # 9. broadcast inputs to the desired number of beams + # 10. broadcast inputs to the desired number of beams input_ids = self._expand_to_num_beams(input_ids, num_beams=num_beams) if "encoder_outputs" in model_kwargs: @@ -1742,7 +1766,7 @@ def _generate( model_kwargs["attention_mask"], num_beams=num_beams ) - # 10. run beam search + # 11. run beam search return self.beam_search( input_ids, max_length=max_length, From f16bbf1475cfb84124fec36b0f2188538e90d26a Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Thu, 29 Sep 2022 12:25:15 -0400 Subject: [PATCH 420/539] Skip pipeline tests (#19248) --- tests/pipelines/test_pipelines_image_segmentation.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/pipelines/test_pipelines_image_segmentation.py b/tests/pipelines/test_pipelines_image_segmentation.py index 611e9d2ed11d60..f35c6640c587e7 100644 --- a/tests/pipelines/test_pipelines_image_segmentation.py +++ b/tests/pipelines/test_pipelines_image_segmentation.py @@ -74,6 +74,9 @@ class ImageSegmentationPipelineTests(unittest.TestCase, metaclass=PipelineTestCa } def get_test_pipeline(self, model, tokenizer, feature_extractor): + # Fix me Alara + if model.__class__.__name__ == "DetrForSegmentation": + return None, None image_segmenter = ImageSegmentationPipeline(model=model, feature_extractor=feature_extractor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", @@ -147,6 +150,7 @@ def test_small_model_tf(self): pass @require_torch + @unittest.skip("Fix me Alara!") def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" From 163cd15279c07f8fe2968afa2e2f27ef56ee1f83 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 29 Sep 2022 19:18:24 +0200 Subject: [PATCH 421/539] Add job names in Past CI artifacts (#19235) Co-authored-by: ydshieh --- .github/workflows/self-past.yml | 36 +++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/.github/workflows/self-past.yml b/.github/workflows/self-past.yml index 71814cf3f32ba3..17ff5b9cca3183 100644 --- a/.github/workflows/self-past.yml +++ b/.github/workflows/self-past.yml @@ -15,6 +15,11 @@ on: version: required: true type: string + # Use this to control the commit to test against + sha: + default: 'main' + required: false + type: string env: HF_HOME: /mnt/cache @@ -67,18 +72,19 @@ jobs: outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: - - name: Checkout transformers - uses: actions/checkout@v2 - with: - fetch-depth: 2 + - name: Update clone + working-directory: /transformers + run: git fetch && git checkout ${{ inputs.sha }} - name: Cleanup + working-directory: /transformers run: | rm -rf tests/__pycache__ rm -rf tests/models/__pycache__ rm -rf reports - id: set-matrix + working-directory: /transformers name: Identify models to test run: | cd tests @@ -99,7 +105,7 @@ jobs: steps: - name: Update clone working-directory: /transformers - run: git fetch && git checkout ${{ github.sha }} + run: git fetch && git checkout ${{ inputs.sha }} - name: Echo folder ${{ matrix.folders }} shell: bash @@ -130,6 +136,15 @@ jobs: continue-on-error: true run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt + - name: Save job name + if: ${{ always() }} + shell: bash + run: | + matrix_folders=${matrix_folders/'models_'/'models/'} + job_name="Model tests ($matrix_folders, ${{ matrix.machine_type }})" + echo "$job_name" + echo "$job_name" > /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/job_name.txt + - name: Test suite reports artifacts if: ${{ always() }} uses: actions/upload-artifact@v2 @@ -152,7 +167,7 @@ jobs: steps: - name: Update clone working-directory: /transformers - run: git fetch && git checkout ${{ github.sha }} + run: git fetch && git checkout ${{ inputs.sha }} - name: Echo folder ${{ matrix.folders }} shell: bash @@ -183,6 +198,15 @@ jobs: continue-on-error: true run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt + - name: Save job name + if: ${{ always() }} + shell: bash + run: | + matrix_folders=${matrix_folders/'models_'/'models/'} + job_name="Model tests ($matrix_folders, ${{ matrix.machine_type }})" + echo "$job_name" + echo "$job_name" > /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/job_name.txt + - name: Test suite reports artifacts if: ${{ always() }} uses: actions/upload-artifact@v2 From 1a1893e5d820ea811cc329326595e7fba108e1ea Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Thu, 29 Sep 2022 19:22:23 +0200 Subject: [PATCH 422/539] Update Past CI report script (#19228) * Simplify the error report * Add status placeholder * Add job links Co-authored-by: ydshieh --- utils/get_ci_error_statistics.py | 81 +++++++++++++++++++++++--------- 1 file changed, 59 insertions(+), 22 deletions(-) diff --git a/utils/get_ci_error_statistics.py b/utils/get_ci_error_statistics.py index 9d0beeaaca37b6..790ec5e3d565c3 100644 --- a/utils/get_ci_error_statistics.py +++ b/utils/get_ci_error_statistics.py @@ -10,6 +10,28 @@ import requests +def get_job_links(workflow_run_id): + """Extract job names and their job links in a GitHub Actions workflow run""" + + url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" + result = requests.get(url).json() + job_links = {} + + try: + job_links.update({job["name"]: job["html_url"] for job in result["jobs"]}) + pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) + + for i in range(pages_to_iterate_over): + result = requests.get(url + f"&page={i + 2}").json() + job_links.update({job["name"]: job["html_url"] for job in result["jobs"]}) + + return job_links + except Exception as e: + print("Unknown error, could not fetch links.", e) + + return {} + + def get_artifacts_links(worflow_run_id): """Get all artifact links from a workflow run""" @@ -54,16 +76,17 @@ def download_artifact(artifact_name, artifact_url, output_dir, token): break -def get_errors_from_single_artifact(artifact_zip_path): +def get_errors_from_single_artifact(artifact_zip_path, job_links=None): """Extract errors from a downloaded artifact (in .zip format)""" errors = [] failed_tests = [] + job_name = None with zipfile.ZipFile(artifact_zip_path) as z: for filename in z.namelist(): if not os.path.isdir(filename): # read the file - if filename in ["failures_line.txt", "summary_short.txt"]: + if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(filename) as f: for line in f: line = line.decode("UTF-8").strip() @@ -80,6 +103,8 @@ def get_errors_from_single_artifact(artifact_zip_path): # `test` is the test method that failed test = line[len("FAILED ") :] failed_tests.append(test) + elif filename == "job_name.txt": + job_name = line if len(errors) != len(failed_tests): raise ValueError( @@ -88,23 +113,26 @@ def get_errors_from_single_artifact(artifact_zip_path): " problem." ) - return errors, failed_tests + job_link = None + if job_name and job_links: + job_link = job_links.get(job_name, None) + + # A list with elements of the form (line of error, error, failed test) + result = [x + [y] + [job_link] for x, y in zip(errors, failed_tests)] + + return result -def get_all_errors(artifact_dir): +def get_all_errors(artifact_dir, job_links=None): """Extract errors from all artifact files""" errors = [] - failed_tests = [] paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if p.endswith(".zip")] - for p in paths: - _errors, _failed_tests = get_errors_from_single_artifact(p) - errors.extend(_errors) - failed_tests.extend(_failed_tests) + errors.extend(get_errors_from_single_artifact(p, job_links=job_links)) - return errors, failed_tests + return errors def reduce_by_error(logs, error_filter=None): @@ -156,12 +184,12 @@ def reduce_by_model(logs, error_filter=None): def make_github_table(reduced_by_error): - header = "| no. | error |" - sep = "|-:|:-|" + header = "| no. | error | status |" + sep = "|-:|:-|:-|" lines = [header, sep] for error in reduced_by_error: count = reduced_by_error[error]["count"] - line = f"| {count} | {error[:100]} |" + line = f"| {count} | {error[:100]} | |" lines.append(line) return "\n".join(lines) @@ -201,6 +229,20 @@ def make_github_table_per_model(reduced_by_model): os.makedirs(args.output_dir, exist_ok=True) + _job_links = get_job_links(args.workflow_run_id) + job_links = {} + # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. + # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. + if _job_links: + for k, v in _job_links.items(): + # This is how GitHub actions combine job names. + if " / " in k: + index = k.find(" / ") + k = k[index + len(" / ") :] + job_links[k] = v + with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: + json.dump(job_links, fp, ensure_ascii=False, indent=4) + artifacts = get_artifacts_links(args.workflow_run_id) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) @@ -210,8 +252,9 @@ def make_github_table_per_model(reduced_by_model): # Be gentle to GitHub time.sleep(1) - errors, failed_tests = get_all_errors(args.output_dir) + errors = get_all_errors(args.output_dir, job_links=job_links) + # `e[1]` is the error counter = Counter() counter.update([e[1] for e in errors]) @@ -223,14 +266,8 @@ def make_github_table_per_model(reduced_by_model): with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) - with open(os.path.join(args.output_dir, "failed_tests.json"), "w", encoding="UTF-8") as fp: - json.dump(failed_tests, fp, ensure_ascii=False, indent=4) - - # Produce tables for GitHub issue. - logs = [(error_line, error, failed_test) for (error_line, error), failed_test in zip(errors, failed_tests)] - - reduced_by_error = reduce_by_error(logs) - reduced_by_model = reduce_by_model(logs) + reduced_by_error = reduce_by_error(errors) + reduced_by_model = reduce_by_model(errors) s1 = make_github_table(reduced_by_error) s2 = make_github_table_per_model(reduced_by_model) From 49d62b01783416a89acc0b865f7cb8dbab87cd6b Mon Sep 17 00:00:00 2001 From: rbsteinm Date: Thu, 29 Sep 2022 19:23:14 +0200 Subject: [PATCH 423/539] [Wav2Vec2] Fix None loss in doc examples (#19218) * pass sampled_negative_indices parameter to the model to avoid getting a None loss * concerns doc examples for Wav2Vec2ForPreTraining and Wav2Vec2ConformerForPreTraining --- .../models/wav2vec2/modeling_wav2vec2.py | 22 ++++++++++++---- .../modeling_wav2vec2_conformer.py | 25 +++++++++++++++---- 2 files changed, 37 insertions(+), 10 deletions(-) diff --git a/src/transformers/models/wav2vec2/modeling_wav2vec2.py b/src/transformers/models/wav2vec2/modeling_wav2vec2.py index 7feb7790dc395d..86eb7b8dc5aa21 100755 --- a/src/transformers/models/wav2vec2/modeling_wav2vec2.py +++ b/src/transformers/models/wav2vec2/modeling_wav2vec2.py @@ -1421,7 +1421,7 @@ def forward( ```python >>> import torch >>> from transformers import AutoFeatureExtractor, Wav2Vec2ForPreTraining - >>> from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices + >>> from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices >>> from datasets import load_dataset >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") @@ -1432,9 +1432,19 @@ def forward( >>> # compute masked indices >>> batch_size, raw_sequence_length = input_values.shape - >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) - >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) - >>> mask_time_indices = torch.tensor(mask_time_indices, device=input_values.device, dtype=torch.long) + >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length).item() + >>> mask_time_indices = _compute_mask_indices( + ... shape=(batch_size, sequence_length), mask_prob=0.2, mask_length=2 + ... ) + >>> sampled_negative_indices = _sample_negative_indices( + ... features_shape=(batch_size, sequence_length), + ... num_negatives=model.config.num_negatives, + ... mask_time_indices=mask_time_indices, + ... ) + >>> mask_time_indices = torch.tensor(data=mask_time_indices, device=input_values.device, dtype=torch.long) + >>> sampled_negative_indices = torch.tensor( + ... data=sampled_negative_indices, device=input_values.device, dtype=torch.long + ... ) >>> with torch.no_grad(): ... outputs = model(input_values, mask_time_indices=mask_time_indices) @@ -1448,7 +1458,9 @@ def forward( >>> # for contrastive loss training model should be put into train mode >>> model = model.train() - >>> loss = model(input_values, mask_time_indices=mask_time_indices).loss + >>> loss = model( + ... input_values, mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices + ... ).loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict diff --git a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py index 8723c6338d2d83..05502c9878de96 100644 --- a/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py +++ b/src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py @@ -1469,7 +1469,10 @@ def forward( ```python >>> import torch >>> from transformers import AutoFeatureExtractor, Wav2Vec2ConformerForPreTraining - >>> from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer import _compute_mask_indices + >>> from transformers.models.wav2vec2_conformer.modeling_wav2vec2_conformer import ( + ... _compute_mask_indices, + ... _sample_negative_indices, + ... ) >>> from datasets import load_dataset >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-conformer-rel-pos-large") @@ -1480,9 +1483,19 @@ def forward( >>> # compute masked indices >>> batch_size, raw_sequence_length = input_values.shape - >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length) - >>> mask_time_indices = _compute_mask_indices((batch_size, sequence_length), mask_prob=0.2, mask_length=2) - >>> mask_time_indices = torch.tensor(mask_time_indices, device=input_values.device, dtype=torch.long) + >>> sequence_length = model._get_feat_extract_output_lengths(raw_sequence_length).item() + >>> mask_time_indices = _compute_mask_indices( + ... shape=(batch_size, sequence_length), mask_prob=0.2, mask_length=2 + ... ) + >>> sampled_negative_indices = _sample_negative_indices( + ... features_shape=(batch_size, sequence_length), + ... num_negatives=model.config.num_negatives, + ... mask_time_indices=mask_time_indices, + ... ) + >>> mask_time_indices = torch.tensor(data=mask_time_indices, device=input_values.device, dtype=torch.long) + >>> sampled_negative_indices = torch.tensor( + ... data=sampled_negative_indices, device=input_values.device, dtype=torch.long + ... ) >>> with torch.no_grad(): ... outputs = model(input_values, mask_time_indices=mask_time_indices) @@ -1496,7 +1509,9 @@ def forward( >>> # for contrastive loss training model should be put into train mode >>> model = model.train() - >>> loss = model(input_values, mask_time_indices=mask_time_indices).loss + >>> loss = model( + ... input_values, mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices + ... ).loss ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict From f3d2f7a6e08efe18debf59512325f02128394b43 Mon Sep 17 00:00:00 2001 From: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Date: Fri, 30 Sep 2022 08:25:43 +0200 Subject: [PATCH 424/539] Add MarkupLM (#19198) * First draft * Make basic test work * Fix most tokenizer tests * More improvements * Make more tests pass * Fix more tests * Fix some code quality * Improve truncation * Implement feature extractor * Improve feature extractor and add tests * Improve feature extractor tests * Fix pair_input test partly * Add fast tokenizer * Improve implementation * Fix rebase * Fix rebase * Fix most of the tokenizer tests. * propose solution for fast * add: integration test for fasttokenizer, warning for decode, fix template in slow tokenizer * add: modify markuplmconverter * add: some modify on converter and tokenizerfast * Fix style, copies * Make fixup * Update tokenization_markuplm.py * Update test_tokenization_markuplm.py * Update markuplm related * Improve processor, add integration test * Add processor test file * Improve processor * Improve processor tests * Fix more processor tests * Fix processor tests * Update docstrings * Add Copied from statements * Add more Copied from statements * Add code examples * Improve code examples * Add model to doc tests * Adding dependency check * Add dummy file * Add requires_backends * Add model to toctree * Fix more things, disable dependency check for now * Apply more suggestions * Add soft dependency * Add annotators to tests * Fix style * Remove from_slow=True * Remove print statements * Add sanity check * Fix processor test * Fix processor tests, add more docs * Add doc tests for mdx file * Add more tips * Apply suggestions Co-authored-by: Niels Rogge Co-authored-by: lockon-n <45759388+lockon-n@users.noreply.github.com> Co-authored-by: SaulLu Co-authored-by: lockon-n --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 3 +- README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/markuplm.mdx | 246 ++ src/transformers/__init__.py | 34 + src/transformers/convert_slow_tokenizer.py | 39 + src/transformers/file_utils.py | 1 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + src/transformers/models/auto/modeling_auto.py | 5 + .../models/auto/processing_auto.py | 1 + src/transformers/models/markuplm/__init__.py | 88 + .../models/markuplm/configuration_markuplm.py | 151 ++ .../markuplm/feature_extraction_markuplm.py | 183 ++ .../models/markuplm/modeling_markuplm.py | 1300 ++++++++++ .../models/markuplm/processing_markuplm.py | 140 + .../models/markuplm/tokenization_markuplm.py | 1461 +++++++++++ .../markuplm/tokenization_markuplm_fast.py | 924 +++++++ src/transformers/testing_utils.py | 8 + src/transformers/utils/__init__.py | 1 + src/transformers/utils/dummy_pt_objects.py | 38 + .../utils/dummy_tokenizers_objects.py | 7 + src/transformers/utils/import_utils.py | 11 + tests/models/markuplm/__init__.py | 0 .../test_feature_extraction_markuplm.py | 114 + .../models/markuplm/test_modeling_markuplm.py | 364 +++ .../markuplm/test_processor_markuplm.py | 451 ++++ .../markuplm/test_tokenization_markuplm.py | 2306 +++++++++++++++++ utils/documentation_tests.txt | 2 + 32 files changed, 7888 insertions(+), 1 deletion(-) create mode 100644 docs/source/en/model_doc/markuplm.mdx create mode 100644 src/transformers/models/markuplm/__init__.py create mode 100644 src/transformers/models/markuplm/configuration_markuplm.py create mode 100644 src/transformers/models/markuplm/feature_extraction_markuplm.py create mode 100755 src/transformers/models/markuplm/modeling_markuplm.py create mode 100644 src/transformers/models/markuplm/processing_markuplm.py create mode 100644 src/transformers/models/markuplm/tokenization_markuplm.py create mode 100644 src/transformers/models/markuplm/tokenization_markuplm_fast.py create mode 100644 tests/models/markuplm/__init__.py create mode 100644 tests/models/markuplm/test_feature_extraction_markuplm.py create mode 100644 tests/models/markuplm/test_modeling_markuplm.py create mode 100644 tests/models/markuplm/test_processor_markuplm.py create mode 100644 tests/models/markuplm/test_tokenization_markuplm.py diff --git a/README.md b/README.md index 5e17e33b204cc1..ea026159803b7d 100644 --- a/README.md +++ b/README.md @@ -328,6 +328,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert. 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. +1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. 1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov. 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. diff --git a/README_ko.md b/README_ko.md index f53075ff5fe6f9..e7a0d9d2960470 100644 --- a/README_ko.md +++ b/README_ko.md @@ -278,6 +278,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert. 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. +1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. 1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov. 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. diff --git a/README_zh-hans.md b/README_zh-hans.md index 2843a8eb29a08c..f3f1a5474c833c 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -302,7 +302,8 @@ conda install -c huggingface transformers 1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (来自 Facebook) 伴随论文 [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) 由 Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert 发布。 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (来自 Facebook) 伴随论文 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 由 Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 发布。 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** 用 [OPUS](http://opus.nlpl.eu/) 数据训练的机器翻译模型由 Jörg Tiedemann 发布。[Marian Framework](https://marian-nmt.github.io/) 由微软翻译团队开发。 -1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov +1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (来自 Microsoft Research Asia) 伴随论文 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 由 Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 发布。 +1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov >>>>>>> Fix rebase 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) 由 Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer 发布。 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) 由 Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan 发布。 1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 8f74b97e98549f..43e8a05372cd07 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -314,6 +314,7 @@ conda install -c huggingface transformers 1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert. 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. +1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. 1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 5e2d25ee3c4d11..644778e155c978 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -279,6 +279,8 @@ title: M2M100 - local: model_doc/marian title: MarianMT + - local: model_doc/markuplm + title: MarkupLM - local: model_doc/mbart title: MBart and MBart-50 - local: model_doc/megatron-bert diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 98a458e11fff41..652c5bc77b8ee9 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -118,6 +118,7 @@ The documentation is organized into five sections: 1. **[M-CTC-T](model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert. 1. **[M2M100](model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. +1. **[MarkupLM](model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. 1. **[MaskFormer](model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov. 1. **[mBART](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. @@ -264,6 +265,7 @@ Flax), PyTorch, and/or TensorFlow. | M-CTC-T | ❌ | ❌ | ✅ | ❌ | ❌ | | M2M100 | ✅ | ❌ | ✅ | ❌ | ❌ | | Marian | ✅ | ❌ | ✅ | ✅ | ✅ | +| MarkupLM | ✅ | ✅ | ✅ | ❌ | ❌ | | MaskFormer | ❌ | ❌ | ✅ | ❌ | ❌ | | mBART | ✅ | ✅ | ✅ | ✅ | ✅ | | Megatron-BERT | ❌ | ❌ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/markuplm.mdx b/docs/source/en/model_doc/markuplm.mdx new file mode 100644 index 00000000000000..66ba7a8180d677 --- /dev/null +++ b/docs/source/en/model_doc/markuplm.mdx @@ -0,0 +1,246 @@ + + +# MarkupLM + +## Overview + +The MarkupLM model was proposed in [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document +Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. MarkupLM is BERT, but +applied to HTML pages instead of raw text documents. The model incorporates additional embedding layers to improve +performance, similar to [LayoutLM](layoutlm). + +The model can be used for tasks like question answering on web pages or information extraction from web pages. It obtains +state-of-the-art results on 2 important benchmarks: +- [WebSRC](https://x-lance.github.io/WebSRC/), a dataset for Web-Based Structual Reading Comprehension (a bit like SQuAD but for web pages) +- [SWDE](https://www.researchgate.net/publication/221299838_From_one_tree_to_a_forest_a_unified_solution_for_structured_web_data_extraction), a dataset +for information extraction from web pages (basically named-entity recogntion on web pages) + +The abstract from the paper is the following: + +*Multimodal pre-training with text, layout, and image has made significant progress for Visually-rich Document +Understanding (VrDU), especially the fixed-layout documents such as scanned document images. While, there are still a +large number of digital documents where the layout information is not fixed and needs to be interactively and +dynamically rendered for visualization, making existing layout-based pre-training approaches not easy to apply. In this +paper, we propose MarkupLM for document understanding tasks with markup languages as the backbone such as +HTML/XML-based documents, where text and markup information is jointly pre-trained. Experiment results show that the +pre-trained MarkupLM significantly outperforms the existing strong baseline models on several document understanding +tasks. The pre-trained model and code will be publicly available.* + +Tips: +- In addition to `input_ids`, [`~MarkupLMModel.forward`] expects 2 additional inputs, namely `xpath_tags_seq` and `xpath_subs_seq`. +These are the XPATH tags and subscripts respectively for each token in the input sequence. +- One can use [`MarkupLMProcessor`] to prepare all data for the model. Refer to the [usage guide](#usage-markuplmprocessor) for more info. +- Demo notebooks can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/MarkupLM). + + + + MarkupLM architecture. Taken from the original paper. + +This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/markuplm). + +## Usage: MarkupLMProcessor + +The easiest way to prepare data for the model is to use [`MarkupLMProcessor`], which internally combines a feature extractor +([`MarkupLMFeatureExtractor`]) and a tokenizer ([`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]). The feature extractor is +used to extract all nodes and xpaths from the HTML strings, which are then provided to the tokenizer, which turns them into the +token-level inputs of the model (`input_ids` etc.). Note that you can still use the feature extractor and tokenizer separately, +if you only want to handle one of the two tasks. + +```python +from transformers import MarkupLMFeatureExtractor, MarkupLMTokenizerFast, MarkupLMProcessor + +feature_extractor = MarkupLMFeatureExtractor() +tokenizer = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base") +processor = MarkupLMProcessor(feature_extractor, tokenizer) +``` + +In short, one can provide HTML strings (and possibly additional data) to [`MarkupLMProcessor`], +and it will create the inputs expected by the model. Internally, the processor first uses +[`MarkupLMFeatureExtractor`] to get a list of nodes and corresponding xpaths. The nodes and +xpaths are then provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which converts them +to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_subs_seq`, `xpath_tags_seq`. +Optionally, one can provide node labels to the processor, which are turned into token-level `labels`. + +[`MarkupLMFeatureExtractor`] uses [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/), a Python library for +pulling data out of HTML and XML files, under the hood. Note that you can still use your own parsing solution of +choice, and provide the nodes and xpaths yourself to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. + +In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these +use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs). + +**Use case 1: web page classification (training, inference) + token classification (inference), parse_html = True** + +This is the simplest case, in which the processor will use the feature extractor to get all nodes and xpaths from the HTML. + +```python +>>> from transformers import MarkupLMProcessor + +>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") + +>>> html_string = """ +... +... +... +... Hello world +... +... + +...

Welcome

+...

Here is my website.

+ +... +... """ + +>>> # note that you can also add provide all tokenizer parameters here such as padding, truncation +>>> encoding = processor(html_string, return_tensors="pt") +>>> print(encoding.keys()) +dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) +``` + +**Use case 2: web page classification (training, inference) + token classification (inference), parse_html=False** + +In case one already has obtained all nodes and xpaths, one doesn't need the feature extractor. In that case, one should +provide the nodes and corresponding xpaths themselves to the processor, and make sure to set `parse_html` to `False`. + +```python +>>> from transformers import MarkupLMProcessor + +>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") +>>> processor.parse_html = False + +>>> nodes = ["hello", "world", "how", "are"] +>>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] +>>> encoding = processor(nodes=nodes, xpaths=xpaths, return_tensors="pt") +>>> print(encoding.keys()) +dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) +``` + +**Use case 3: token classification (training), parse_html=False** + +For token classification tasks (such as [SWDE](https://paperswithcode.com/dataset/swde)), one can also provide the +corresponding node labels in order to train a model. The processor will then convert these into token-level `labels`. +By default, it will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the +`ignore_index` of PyTorch's CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can +initialize the tokenizer with `only_label_first_subword` set to `False`. + +```python +>>> from transformers import MarkupLMProcessor + +>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") +>>> processor.parse_html = False + +>>> nodes = ["hello", "world", "how", "are"] +>>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] +>>> node_labels = [1, 2, 2, 1] +>>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt") +>>> print(encoding.keys()) +dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq', 'labels']) +``` + +**Use case 4: web page question answering (inference), parse_html=True** + +For question answering tasks on web pages, you can provide a question to the processor. By default, the +processor will use the feature extractor to get all nodes and xpaths, and create [CLS] question tokens [SEP] word tokens [SEP]. + +```python +>>> from transformers import MarkupLMProcessor + +>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") + +>>> html_string = """ +... +... +... +... Hello world +... +... + +...

Welcome

+...

My name is Niels.

+ +... +... """ + +>>> question = "What's his name?" +>>> encoding = processor(html_string, questions=question, return_tensors="pt") +>>> print(encoding.keys()) +dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) +``` + +**Use case 5: web page question answering (inference), apply_ocr=False** + +For question answering tasks (such as WebSRC), you can provide a question to the processor. If you have extracted +all nodes and xpaths yourself, you can provide them directly to the processor. Make sure to set `parse_html` to `False`. + +```python +>>> from transformers import MarkupLMProcessor + +>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") +>>> processor.parse_html = False + +>>> nodes = ["hello", "world", "how", "are"] +>>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] +>>> question = "What's his name?" +>>> encoding = processor(nodes=nodes, xpaths=xpaths, questions=question, return_tensors="pt") +>>> print(encoding.keys()) +dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) +``` + +## MarkupLMConfig + +[[autodoc]] MarkupLMConfig + - all + +## MarkupLMFeatureExtractor + +[[autodoc]] MarkupLMFeatureExtractor + - __call__ + +## MarkupLMTokenizer + +[[autodoc]] MarkupLMTokenizer + - build_inputs_with_special_tokens + - get_special_tokens_mask + - create_token_type_ids_from_sequences + - save_vocabulary + +## MarkupLMTokenizerFast + +[[autodoc]] MarkupLMTokenizerFast + - all + +## MarkupLMProcessor + +[[autodoc]] MarkupLMProcessor + - __call__ + +## MarkupLMModel + +[[autodoc]] MarkupLMModel + - forward + +## MarkupLMForSequenceClassification + +[[autodoc]] MarkupLMForSequenceClassification + - forward + +## MarkupLMForTokenClassification + +[[autodoc]] MarkupLMForTokenClassification + - forward + +## MarkupLMForQuestionAnswering + +[[autodoc]] MarkupLMForQuestionAnswering + - forward \ No newline at end of file diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index fb09d0af9f261d..6478bcd7e5b544 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -262,6 +262,13 @@ "models.lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig", "LxmertTokenizer"], "models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"], "models.marian": ["MarianConfig"], + "models.markuplm": [ + "MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MarkupLMConfig", + "MarkupLMFeatureExtractor", + "MarkupLMProcessor", + "MarkupLMTokenizer", + ], "models.maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], "models.mbart": ["MBartConfig"], "models.mbart50": [], @@ -570,6 +577,7 @@ _import_structure["models.led"].append("LEDTokenizerFast") _import_structure["models.longformer"].append("LongformerTokenizerFast") _import_structure["models.lxmert"].append("LxmertTokenizerFast") + _import_structure["models.markuplm"].append("MarkupLMTokenizerFast") _import_structure["models.mbart"].append("MBartTokenizerFast") _import_structure["models.mbart50"].append("MBart50TokenizerFast") _import_structure["models.mobilebert"].append("MobileBertTokenizerFast") @@ -1488,6 +1496,16 @@ "MaskFormerPreTrainedModel", ] ) + _import_structure["models.markuplm"].extend( + [ + "MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "MarkupLMForQuestionAnswering", + "MarkupLMForSequenceClassification", + "MarkupLMForTokenClassification", + "MarkupLMModel", + "MarkupLMPreTrainedModel", + ] + ) _import_structure["models.mbart"].extend( [ "MBartForCausalLM", @@ -3192,6 +3210,13 @@ from .models.lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config from .models.marian import MarianConfig + from .models.markuplm import ( + MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP, + MarkupLMConfig, + MarkupLMFeatureExtractor, + MarkupLMProcessor, + MarkupLMTokenizer, + ) from .models.maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .models.mbart import MBartConfig from .models.mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig, MCTCTProcessor @@ -3465,6 +3490,7 @@ from .models.led import LEDTokenizerFast from .models.longformer import LongformerTokenizerFast from .models.lxmert import LxmertTokenizerFast + from .models.markuplm import MarkupLMTokenizerFast from .models.mbart import MBartTokenizerFast from .models.mbart50 import MBart50TokenizerFast from .models.mobilebert import MobileBertTokenizerFast @@ -4196,6 +4222,14 @@ M2M100PreTrainedModel, ) from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel + from .models.markuplm import ( + MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST, + MarkupLMForQuestionAnswering, + MarkupLMForSequenceClassification, + MarkupLMForTokenClassification, + MarkupLMModel, + MarkupLMPreTrainedModel, + ) from .models.maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, diff --git a/src/transformers/convert_slow_tokenizer.py b/src/transformers/convert_slow_tokenizer.py index 6fbd7b49b066a0..ce52ba3b3beba0 100644 --- a/src/transformers/convert_slow_tokenizer.py +++ b/src/transformers/convert_slow_tokenizer.py @@ -1043,6 +1043,44 @@ def post_processor(self): ) +class MarkupLMConverter(Converter): + def converted(self) -> Tokenizer: + ot = self.original_tokenizer + vocab = ot.encoder + merges = list(ot.bpe_ranks.keys()) + + tokenizer = Tokenizer( + BPE( + vocab=vocab, + merges=merges, + dropout=None, + continuing_subword_prefix="", + end_of_word_suffix="", + fuse_unk=False, + unk_token=self.original_tokenizer.unk_token, + ) + ) + + tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) + tokenizer.decoder = decoders.ByteLevel() + + cls = str(self.original_tokenizer.cls_token) + sep = str(self.original_tokenizer.sep_token) + cls_token_id = self.original_tokenizer.cls_token_id + sep_token_id = self.original_tokenizer.sep_token_id + + tokenizer.post_processor = processors.TemplateProcessing( + single=f"{cls} $A {sep}", + pair=f"{cls} $A {sep} $B {sep}", + special_tokens=[ + (cls, cls_token_id), + (sep, sep_token_id), + ], + ) + + return tokenizer + + SLOW_TO_FAST_CONVERTERS = { "AlbertTokenizer": AlbertConverter, "BartTokenizer": RobertaConverter, @@ -1072,6 +1110,7 @@ def post_processor(self): "LongformerTokenizer": RobertaConverter, "LEDTokenizer": RobertaConverter, "LxmertTokenizer": BertConverter, + "MarkupLMTokenizer": MarkupLMConverter, "MBartTokenizer": MBartConverter, "MBart50Tokenizer": MBart50Converter, "MPNetTokenizer": MPNetConverter, diff --git a/src/transformers/file_utils.py b/src/transformers/file_utils.py index aa3681e057bb9d..87cd9a46918723 100644 --- a/src/transformers/file_utils.py +++ b/src/transformers/file_utils.py @@ -79,6 +79,7 @@ has_file, http_user_agent, is_apex_available, + is_bs4_available, is_coloredlogs_available, is_datasets_available, is_detectron2_available, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 18c21cdf1865b3..261d4c03e2369f 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -88,6 +88,7 @@ lxmert, m2m_100, marian, + markuplm, maskformer, mbart, mbart50, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 39c48b217ff53f..781641b74edf92 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -90,6 +90,7 @@ ("lxmert", "LxmertConfig"), ("m2m_100", "M2M100Config"), ("marian", "MarianConfig"), + ("markuplm", "MarkupLMConfig"), ("maskformer", "MaskFormerConfig"), ("mbart", "MBartConfig"), ("mctct", "MCTCTConfig"), @@ -221,6 +222,7 @@ ("luke", "LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("lxmert", "LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("m2m_100", "M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("markuplm", "MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("maskformer", "MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("mbart", "MBART_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("mctct", "MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -357,6 +359,7 @@ ("lxmert", "LXMERT"), ("m2m_100", "M2M100"), ("marian", "Marian"), + ("markuplm", "MarkupLM"), ("maskformer", "MaskFormer"), ("mbart", "mBART"), ("mbart50", "mBART-50"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 936e9c8bdc479c..d703c5b22a6c0f 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -89,6 +89,7 @@ ("lxmert", "LxmertModel"), ("m2m_100", "M2M100Model"), ("marian", "MarianModel"), + ("markuplm", "MarkupLMModel"), ("maskformer", "MaskFormerModel"), ("mbart", "MBartModel"), ("mctct", "MCTCTModel"), @@ -247,6 +248,7 @@ ("luke", "LukeForMaskedLM"), ("m2m_100", "M2M100ForConditionalGeneration"), ("marian", "MarianMTModel"), + ("markuplm", "MarkupLMForMaskedLM"), ("megatron-bert", "MegatronBertForCausalLM"), ("mobilebert", "MobileBertForMaskedLM"), ("mpnet", "MPNetForMaskedLM"), @@ -530,6 +532,7 @@ ("led", "LEDForSequenceClassification"), ("longformer", "LongformerForSequenceClassification"), ("luke", "LukeForSequenceClassification"), + ("markuplm", "MarkupLMForSequenceClassification"), ("mbart", "MBartForSequenceClassification"), ("megatron-bert", "MegatronBertForSequenceClassification"), ("mobilebert", "MobileBertForSequenceClassification"), @@ -585,6 +588,7 @@ ("longformer", "LongformerForQuestionAnswering"), ("luke", "LukeForQuestionAnswering"), ("lxmert", "LxmertForQuestionAnswering"), + ("markuplm", "MarkupLMForQuestionAnswering"), ("mbart", "MBartForQuestionAnswering"), ("megatron-bert", "MegatronBertForQuestionAnswering"), ("mobilebert", "MobileBertForQuestionAnswering"), @@ -654,6 +658,7 @@ ("layoutlmv3", "LayoutLMv3ForTokenClassification"), ("longformer", "LongformerForTokenClassification"), ("luke", "LukeForTokenClassification"), + ("markuplm", "MarkupLMForTokenClassification"), ("megatron-bert", "MegatronBertForTokenClassification"), ("mobilebert", "MobileBertForTokenClassification"), ("mpnet", "MPNetForTokenClassification"), diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index 07b2811a16481b..9885cae95e88cb 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -46,6 +46,7 @@ ("layoutlmv2", "LayoutLMv2Processor"), ("layoutlmv3", "LayoutLMv3Processor"), ("layoutxlm", "LayoutXLMProcessor"), + ("markuplm", "MarkupLMProcessor"), ("owlvit", "OwlViTProcessor"), ("sew", "Wav2Vec2Processor"), ("sew-d", "Wav2Vec2Processor"), diff --git a/src/transformers/models/markuplm/__init__.py b/src/transformers/models/markuplm/__init__.py new file mode 100644 index 00000000000000..9d81b9ad369ea5 --- /dev/null +++ b/src/transformers/models/markuplm/__init__.py @@ -0,0 +1,88 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +# rely on isort to merge the imports +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available + + +_import_structure = { + "configuration_markuplm": ["MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "MarkupLMConfig"], + "feature_extraction_markuplm": ["MarkupLMFeatureExtractor"], + "processing_markuplm": ["MarkupLMProcessor"], + "tokenization_markuplm": ["MarkupLMTokenizer"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_markuplm_fast"] = ["MarkupLMTokenizerFast"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_markuplm"] = [ + "MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST", + "MarkupLMForQuestionAnswering", + "MarkupLMForSequenceClassification", + "MarkupLMForTokenClassification", + "MarkupLMModel", + "MarkupLMPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_markuplm import MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP, MarkupLMConfig + from .feature_extraction_markuplm import MarkupLMFeatureExtractor + from .processing_markuplm import MarkupLMProcessor + from .tokenization_markuplm import MarkupLMTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_markuplm_fast import MarkupLMTokenizerFast + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_markuplm import ( + MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST, + MarkupLMForQuestionAnswering, + MarkupLMForSequenceClassification, + MarkupLMForTokenClassification, + MarkupLMModel, + MarkupLMPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/src/transformers/models/markuplm/configuration_markuplm.py b/src/transformers/models/markuplm/configuration_markuplm.py new file mode 100644 index 00000000000000..a7676d7db4bba2 --- /dev/null +++ b/src/transformers/models/markuplm/configuration_markuplm.py @@ -0,0 +1,151 @@ +# coding=utf-8 +# Copyright 2021, The Microsoft Research Asia MarkupLM Team authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" MarkupLM model configuration""" + +from transformers.models.roberta.configuration_roberta import RobertaConfig +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + +MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json", + "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json", +} + + +class MarkupLMConfig(RobertaConfig): + r""" + This is the configuration class to store the configuration of a [`MarkupLMModel`]. It is used to instantiate a + MarkupLM model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the MarkupLM + [microsoft/markuplm-base-uncased](https://huggingface.co/microsoft/markuplm-base-uncased) architecture. + + Configuration objects inherit from [`BertConfig`] and can be used to control the model outputs. Read the + documentation from [`BertConfig`] for more information. + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the MarkupLM model. Defines the different tokens that can be represented by the + *inputs_ids* passed to the forward method of [`MarkupLMModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed into [`MarkupLMModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + gradient_checkpointing (`bool`, *optional*, defaults to `False`): + If True, use gradient checkpointing to save memory at the expense of slower backward pass. + max_tree_id_unit_embeddings (`int`, *optional*, defaults to 1024): + The maximum value that the tree id unit embedding might ever use. Typically set this to something large + just in case (e.g., 1024). + max_xpath_tag_unit_embeddings (`int`, *optional*, defaults to 256): + The maximum value that the xpath tag unit embedding might ever use. Typically set this to something large + just in case (e.g., 256). + max_xpath_subs_unit_embeddings (`int`, *optional*, defaults to 1024): + The maximum value that the xpath subscript unit embedding might ever use. Typically set this to something + large just in case (e.g., 1024). + tag_pad_id (`int`, *optional*, defaults to 216): + The id of the padding token in the xpath tags. + subs_pad_id (`int`, *optional*, defaults to 1001): + The id of the padding token in the xpath subscripts. + xpath_tag_unit_hidden_size (`int`, *optional*, defaults to 32): + The hidden size of each tree id unit. One complete tree index will have + (50*xpath_tag_unit_hidden_size)-dim. + max_depth (`int`, *optional*, defaults to 50): + The maximum depth in xpath. + + Examples: + + ```python + >>> from transformers import MarkupLMModel, MarkupLMConfig + + >>> # Initializing a MarkupLM microsoft/markuplm-base style configuration + >>> configuration = MarkupLMConfig() + + >>> # Initializing a model from the microsoft/markuplm-base style configuration + >>> model = MarkupLMModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "markuplm" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=0, + gradient_checkpointing=False, + max_xpath_tag_unit_embeddings=256, + max_xpath_subs_unit_embeddings=1024, + tag_pad_id=216, + subs_pad_id=1001, + xpath_unit_hidden_size=32, + max_depth=50, + **kwargs + ): + super().__init__( + vocab_size=vocab_size, + hidden_size=hidden_size, + num_hidden_layers=num_hidden_layers, + num_attention_heads=num_attention_heads, + intermediate_size=intermediate_size, + hidden_act=hidden_act, + hidden_dropout_prob=hidden_dropout_prob, + attention_probs_dropout_prob=attention_probs_dropout_prob, + max_position_embeddings=max_position_embeddings, + type_vocab_size=type_vocab_size, + initializer_range=initializer_range, + layer_norm_eps=layer_norm_eps, + pad_token_id=pad_token_id, + gradient_checkpointing=gradient_checkpointing, + **kwargs, + ) + # additional properties + self.max_depth = max_depth + self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings + self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings + self.tag_pad_id = tag_pad_id + self.subs_pad_id = subs_pad_id + self.xpath_unit_hidden_size = xpath_unit_hidden_size diff --git a/src/transformers/models/markuplm/feature_extraction_markuplm.py b/src/transformers/models/markuplm/feature_extraction_markuplm.py new file mode 100644 index 00000000000000..b20349fafb0a57 --- /dev/null +++ b/src/transformers/models/markuplm/feature_extraction_markuplm.py @@ -0,0 +1,183 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Feature extractor class for MarkupLM. +""" + +import html + +from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin +from ...utils import is_bs4_available, logging, requires_backends + + +if is_bs4_available(): + import bs4 + from bs4 import BeautifulSoup + + +logger = logging.get_logger(__name__) + + +class MarkupLMFeatureExtractor(FeatureExtractionMixin): + r""" + Constructs a MarkupLM feature extractor. This can be used to get a list of nodes and corresponding xpaths from HTML + strings. + + This feature extractor inherits from [`~feature_extraction_utils.PreTrainedFeatureExtractor`] which contains most + of the main methods. Users should refer to this superclass for more information regarding those methods. + + """ + + def __init__(self, **kwargs): + requires_backends(self, ["bs4"]) + super().__init__(**kwargs) + + def xpath_soup(self, element): + xpath_tags = [] + xpath_subscripts = [] + child = element if element.name else element.parent + for parent in child.parents: # type: bs4.element.Tag + siblings = parent.find_all(child.name, recursive=False) + xpath_tags.append(child.name) + xpath_subscripts.append( + 0 if 1 == len(siblings) else next(i for i, s in enumerate(siblings, 1) if s is child) + ) + child = parent + xpath_tags.reverse() + xpath_subscripts.reverse() + return xpath_tags, xpath_subscripts + + def get_three_from_single(self, html_string): + html_code = BeautifulSoup(html_string, "html.parser") + + all_doc_strings = [] + string2xtag_seq = [] + string2xsubs_seq = [] + + for element in html_code.descendants: + if type(element) == bs4.element.NavigableString: + if type(element.parent) != bs4.element.Tag: + continue + + text_in_this_tag = html.unescape(element).strip() + if not text_in_this_tag: + continue + + all_doc_strings.append(text_in_this_tag) + + xpath_tags, xpath_subscripts = self.xpath_soup(element) + string2xtag_seq.append(xpath_tags) + string2xsubs_seq.append(xpath_subscripts) + + if len(all_doc_strings) != len(string2xtag_seq): + raise ValueError("Number of doc strings and xtags does not correspond") + if len(all_doc_strings) != len(string2xsubs_seq): + raise ValueError("Number of doc strings and xsubs does not correspond") + + return all_doc_strings, string2xtag_seq, string2xsubs_seq + + def construct_xpath(self, xpath_tags, xpath_subscripts): + xpath = "" + for tagname, subs in zip(xpath_tags, xpath_subscripts): + xpath += f"/{tagname}" + if subs != 0: + xpath += f"[{subs}]" + return xpath + + def __call__(self, html_strings) -> BatchFeature: + """ + Main method to prepare for the model one or several HTML strings. + + Args: + html_strings (`str`, `List[str]`): + The HTML string or batch of HTML strings from which to extract nodes and corresponding xpaths. + + Returns: + [`BatchFeature`]: A [`BatchFeature`] with the following fields: + + - **nodes** -- Nodes. + - **xpaths** -- Corresponding xpaths. + + Examples: + + ```python + >>> from transformers import MarkupLMFeatureExtractor + + >>> page_name_1 = "page1.html" + >>> page_name_2 = "page2.html" + >>> page_name_3 = "page3.html" + + >>> with open(page_name_1) as f: + ... single_html_string = f.read() + + >>> feature_extractor = MarkupLMFeatureExtractor() + + >>> # single example + >>> encoding = feature_extractor(single_html_string) + >>> print(encoding.keys()) + >>> # dict_keys(['nodes', 'xpaths']) + + >>> # batched example + + >>> multi_html_strings = [] + + >>> with open(page_name_2) as f: + ... multi_html_strings.append(f.read()) + >>> with open(page_name_3) as f: + ... multi_html_strings.append(f.read()) + + >>> encoding = feature_extractor(multi_html_strings) + >>> print(encoding.keys()) + >>> # dict_keys(['nodes', 'xpaths']) + ```""" + + # Input type checking for clearer error + valid_strings = False + + # Check that strings has a valid type + if isinstance(html_strings, str): + valid_strings = True + elif isinstance(html_strings, (list, tuple)): + if len(html_strings) == 0 or isinstance(html_strings[0], str): + valid_strings = True + + if not valid_strings: + raise ValueError( + "HTML strings must of type `str`, `List[str]` (batch of examples), " + f"but is of type {type(html_strings)}." + ) + + is_batched = bool(isinstance(html_strings, (list, tuple)) and (isinstance(html_strings[0], str))) + + if not is_batched: + html_strings = [html_strings] + + # Get nodes + xpaths + nodes = [] + xpaths = [] + for html_string in html_strings: + all_doc_strings, string2xtag_seq, string2xsubs_seq = self.get_three_from_single(html_string) + nodes.append(all_doc_strings) + xpath_strings = [] + for node, tag_list, sub_list in zip(all_doc_strings, string2xtag_seq, string2xsubs_seq): + xpath_string = self.construct_xpath(tag_list, sub_list) + xpath_strings.append(xpath_string) + xpaths.append(xpath_strings) + + # return as Dict + data = {"nodes": nodes, "xpaths": xpaths} + encoded_inputs = BatchFeature(data=data, tensor_type=None) + + return encoded_inputs diff --git a/src/transformers/models/markuplm/modeling_markuplm.py b/src/transformers/models/markuplm/modeling_markuplm.py new file mode 100755 index 00000000000000..0a8e9050142ee9 --- /dev/null +++ b/src/transformers/models/markuplm/modeling_markuplm.py @@ -0,0 +1,1300 @@ +# coding=utf-8 +# Copyright 2022 Microsoft Research Asia and the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch MarkupLM model.""" + +import math +import os +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.file_utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + replace_return_docstrings, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + MaskedLMOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from transformers.utils import logging + +from .configuration_markuplm import MarkupLMConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "microsoft/markuplm-base" +_CONFIG_FOR_DOC = "MarkupLMConfig" +_TOKENIZER_FOR_DOC = "MarkupLMTokenizer" + +MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "microsoft/markuplm-base", + "microsoft/markuplm-large", +] + + +class XPathEmbeddings(nn.Module): + """Construct the embeddings from xpath tags and subscripts. + + We drop tree-id in this version, as its info can be covered by xpath. + """ + + def __init__(self, config): + super(XPathEmbeddings, self).__init__() + self.max_depth = config.max_depth + + self.xpath_unitseq2_embeddings = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, config.hidden_size) + + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + self.activation = nn.ReLU() + self.xpath_unitseq2_inner = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, 4 * config.hidden_size) + self.inner2emb = nn.Linear(4 * config.hidden_size, config.hidden_size) + + self.xpath_tag_sub_embeddings = nn.ModuleList( + [ + nn.Embedding(config.max_xpath_tag_unit_embeddings, config.xpath_unit_hidden_size) + for _ in range(self.max_depth) + ] + ) + + self.xpath_subs_sub_embeddings = nn.ModuleList( + [ + nn.Embedding(config.max_xpath_subs_unit_embeddings, config.xpath_unit_hidden_size) + for _ in range(self.max_depth) + ] + ) + + def forward(self, xpath_tags_seq=None, xpath_subs_seq=None): + xpath_tags_embeddings = [] + xpath_subs_embeddings = [] + + for i in range(self.max_depth): + xpath_tags_embeddings.append(self.xpath_tag_sub_embeddings[i](xpath_tags_seq[:, :, i])) + xpath_subs_embeddings.append(self.xpath_subs_sub_embeddings[i](xpath_subs_seq[:, :, i])) + + xpath_tags_embeddings = torch.cat(xpath_tags_embeddings, dim=-1) + xpath_subs_embeddings = torch.cat(xpath_subs_embeddings, dim=-1) + + xpath_embeddings = xpath_tags_embeddings + xpath_subs_embeddings + + xpath_embeddings = self.inner2emb(self.dropout(self.activation(self.xpath_unitseq2_inner(xpath_embeddings)))) + + return xpath_embeddings + + +# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids +def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask + return incremental_indices.long() + padding_idx + + +class MarkupLMEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super(MarkupLMEmbeddings, self).__init__() + self.config = config + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + self.max_depth = config.max_depth + + self.xpath_embeddings = XPathEmbeddings(config) + + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + + self.padding_idx = config.pad_token_id + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx + ) + + # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings.create_position_ids_from_inputs_embeds + def create_position_ids_from_inputs_embeds(self, inputs_embeds): + """ + We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. + + Args: + inputs_embeds: torch.Tensor + + Returns: torch.Tensor + """ + input_shape = inputs_embeds.size()[:-1] + sequence_length = input_shape[1] + + position_ids = torch.arange( + self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device + ) + return position_ids.unsqueeze(0).expand(input_shape) + + def forward( + self, + input_ids=None, + xpath_tags_seq=None, + xpath_subs_seq=None, + token_type_ids=None, + position_ids=None, + inputs_embeds=None, + past_key_values_length=0, + ): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if position_ids is None: + if input_ids is not None: + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) + else: + position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) + + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + # prepare xpath seq + if xpath_tags_seq is None: + xpath_tags_seq = self.config.tag_pad_id * torch.ones( + tuple(list(input_shape) + [self.max_depth]), dtype=torch.long, device=device + ) + if xpath_subs_seq is None: + xpath_subs_seq = self.config.subs_pad_id * torch.ones( + tuple(list(input_shape) + [self.max_depth]), dtype=torch.long, device=device + ) + + words_embeddings = inputs_embeds + position_embeddings = self.position_embeddings(position_ids) + + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + xpath_embeddings = self.xpath_embeddings(xpath_tags_seq, xpath_subs_seq) + embeddings = words_embeddings + position_embeddings + token_type_embeddings + xpath_embeddings + + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->MarkupLM +class MarkupLMSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate +class MarkupLMIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->MarkupLM +class MarkupLMOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertPooler +class MarkupLMPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MarkupLM +class MarkupLMPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MarkupLM +class MarkupLMLMPredictionHead(nn.Module): + def __init__(self, config): + super().__init__() + self.transform = MarkupLMPredictionHeadTransform(config) + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, hidden_states): + hidden_states = self.transform(hidden_states) + hidden_states = self.decoder(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->MarkupLM +class MarkupLMOnlyMLMHead(nn.Module): + def __init__(self, config): + super().__init__() + self.predictions = MarkupLMLMPredictionHead(config) + + def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: + prediction_scores = self.predictions(sequence_output) + return prediction_scores + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->MarkupLM +class MarkupLMSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in MarkupLMModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->MarkupLM +class MarkupLMAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = MarkupLMSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = MarkupLMSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->MarkupLM +class MarkupLMLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = MarkupLMAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = MarkupLMAttention(config, position_embedding_type="absolute") + self.intermediate = MarkupLMIntermediate(config) + self.output = MarkupLMOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->MarkupLM +class MarkupLMEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([MarkupLMLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class MarkupLMPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = MarkupLMConfig + pretrained_model_archive_map = MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST + base_model_prefix = "markuplm" + _keys_to_ignore_on_load_missing = [r"position_ids"] + + # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights with Bert->MarkupLM + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs): + return super(MarkupLMPreTrainedModel, cls).from_pretrained( + pretrained_model_name_or_path, *model_args, **kwargs + ) + + +MARKUPLM_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`MarkupLMConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MARKUPLM_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`MarkupLMTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + + xpath_tags_seq (`torch.LongTensor` of shape `({0}, config.max_depth)`, *optional*): + Tag IDs for each token in the input sequence, padded up to config.max_depth. + + xpath_subs_seq (`torch.LongTensor` of shape `({0}, config.max_depth)`, *optional*): + Subscript IDs for each token in the input sequence, padded up to config.max_depth. + + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: `1` for + tokens that are NOT MASKED, `0` for MASKED tokens. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: `0` corresponds to a *sentence A* token, `1` corresponds to a *sentence B* token + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: `1` + indicates the head is **not masked**, `0` indicates the head is **masked**. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + If set to `True`, the attentions tensors of all attention layers are returned. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + If set to `True`, the hidden states of all layers are returned. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + If set to `True`, the model will return a [`~file_utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare MarkupLM Model transformer outputting raw hidden-states without any specific head on top.", + MARKUPLM_START_DOCSTRING, +) +class MarkupLMModel(MarkupLMPreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->MarkupLM + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = MarkupLMEmbeddings(config) + self.encoder = MarkupLMEncoder(config) + + self.pooler = MarkupLMPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + xpath_tags_seq=None, + xpath_subs_seq=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + Examples: + + ```python + >>> from transformers import MarkupLMProcessor, MarkupLMModel + + >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") + >>> model = MarkupLMModel.from_pretrained("microsoft/markuplm-base") + + >>> html_string = " Page Title " + + >>> encoding = processor(html_string, return_tensors="pt") + + >>> outputs = model(**encoding) + >>> last_hidden_states = outputs.last_hidden_state + >>> list(last_hidden_states.shape) + [1, 4, 768] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) + + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + if head_mask is not None: + if head_mask.dim() == 1: + head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) + elif head_mask.dim() == 2: + head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) + head_mask = head_mask.to(dtype=next(self.parameters()).dtype) + else: + head_mask = [None] * self.config.num_hidden_layers + + embedding_output = self.embeddings( + input_ids=input_ids, + xpath_tags_seq=xpath_tags_seq, + xpath_subs_seq=xpath_subs_seq, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + ) + encoder_outputs = self.encoder( + embedding_output, + extended_attention_mask, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + # Copied from transformers.models.bert.modeling_bert.BertModel.prepare_inputs_for_generation + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} + + # Copied from transformers.models.bert.modeling_bert.BertModel._reorder_cache + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past + + +@add_start_docstrings( + """ + MarkupLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + MARKUPLM_START_DOCSTRING, +) +class MarkupLMForQuestionAnswering(MarkupLMPreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler"] + + # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering.__init__ with bert->markuplm, Bert->MarkupLM + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.markuplm = MarkupLMModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + xpath_tags_seq=None, + xpath_subs_seq=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + start_positions=None, + end_positions=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + + Returns: + + Examples: + + ```python + >>> from transformers import MarkupLMProcessor, MarkupLMForQuestionAnswering + >>> import torch + + >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base-finetuned-websrc") + >>> model = MarkupLMForQuestionAnswering.from_pretrained("microsoft/markuplm-base-finetuned-websrc") + + >>> html_string = " My name is Niels " + >>> question = "What's his name?" + + >>> encoding = processor(html_string, questions=question, return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**encoding) + + >>> answer_start_index = outputs.start_logits.argmax() + >>> answer_end_index = outputs.end_logits.argmax() + + >>> predict_answer_tokens = encoding.input_ids[0, answer_start_index : answer_end_index + 1] + >>> processor.decode(predict_answer_tokens).strip() + 'Niels' + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.markuplm( + input_ids, + xpath_tags_seq=xpath_tags_seq, + xpath_subs_seq=xpath_subs_seq, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions.clamp_(0, ignored_index) + end_positions.clamp_(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings("""MarkupLM Model with a `token_classification` head on top.""", MARKUPLM_START_DOCSTRING) +class MarkupLMForTokenClassification(MarkupLMPreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification.__init__ with bert->markuplm, Bert->MarkupLM + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.markuplm = MarkupLMModel(config, add_pooling_layer=False) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + xpath_tags_seq=None, + xpath_subs_seq=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, AutoModelForTokenClassification + >>> import torch + + >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base") + >>> processor.parse_html = False + >>> model = AutoModelForTokenClassification.from_pretrained("microsoft/markuplm-base", num_labels=7) + + >>> nodes = ["hello", "world"] + >>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"] + >>> node_labels = [1, 2] + >>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**encoding) + + >>> loss = outputs.loss + >>> logits = outputs.logits + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.markuplm( + input_ids, + xpath_tags_seq=xpath_tags_seq, + xpath_subs_seq=xpath_subs_seq, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.classifier(sequence_output) # (batch_size, seq_length, node_type_size) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct( + prediction_scores.view(-1, self.config.num_labels), + labels.view(-1), + ) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + MarkupLM Model transformer with a sequence classification/regression head on top (a linear layer on top of the + pooled output) e.g. for GLUE tasks. + """, + MARKUPLM_START_DOCSTRING, +) +class MarkupLMForSequenceClassification(MarkupLMPreTrainedModel): + # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with bert->markuplm, Bert->MarkupLM + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.markuplm = MarkupLMModel(config) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids=None, + xpath_tags_seq=None, + xpath_subs_seq=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + + Returns: + + Examples: + + ```python + >>> from transformers import AutoProcessor, AutoModelForSequenceClassification + >>> import torch + + >>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base") + >>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/markuplm-base", num_labels=7) + + >>> html_string = " Page Title " + >>> encoding = processor(html_string, return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**encoding) + + >>> loss = outputs.loss + >>> logits = outputs.logits + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.markuplm( + input_ids, + xpath_tags_seq=xpath_tags_seq, + xpath_subs_seq=xpath_subs_seq, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/src/transformers/models/markuplm/processing_markuplm.py b/src/transformers/models/markuplm/processing_markuplm.py new file mode 100644 index 00000000000000..5740fe43abc75a --- /dev/null +++ b/src/transformers/models/markuplm/processing_markuplm.py @@ -0,0 +1,140 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Processor class for MarkupLM. +""" +from typing import Optional, Union + +from ...file_utils import TensorType +from ...processing_utils import ProcessorMixin +from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TruncationStrategy + + +class MarkupLMProcessor(ProcessorMixin): + r""" + Constructs a MarkupLM processor which combines a MarkupLM feature extractor and a MarkupLM tokenizer into a single + processor. + + [`MarkupLMProcessor`] offers all the functionalities you need to prepare data for the model. + + It first uses [`MarkupLMFeatureExtractor`] to extract nodes and corresponding xpaths from one or more HTML strings. + Next, these are provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which turns them into token-level + `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and `xpath_subs_seq`. + + Args: + feature_extractor (`MarkupLMFeatureExtractor`): + An instance of [`MarkupLMFeatureExtractor`]. The feature extractor is a required input. + tokenizer (`MarkupLMTokenizer` or `MarkupLMTokenizerFast`): + An instance of [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. The tokenizer is a required input. + parse_html (`bool`, *optional*, defaults to `True`): + Whether or not to use `MarkupLMFeatureExtractor` to parse HTML strings into nodes and corresponding xpaths. + """ + feature_extractor_class = "MarkupLMFeatureExtractor" + tokenizer_class = ("MarkupLMTokenizer", "MarkupLMTokenizerFast") + parse_html = True + + def __call__( + self, + html_strings=None, + nodes=None, + xpaths=None, + node_labels=None, + questions=None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + return_tensors: Optional[Union[str, TensorType]] = None, + **kwargs + ) -> BatchEncoding: + """ + This method first forwards the `html_strings` argument to [`~MarkupLMFeatureExtractor.__call__`]. Next, it + passes the `nodes` and `xpaths` along with the additional arguments to [`~MarkupLMTokenizer.__call__`] and + returns the output. + + Optionally, one can also provide a `text` argument which is passed along as first sequence. + + Please refer to the docstring of the above two methods for more information. + """ + # first, create nodes and xpaths + if self.parse_html: + if html_strings is None: + raise ValueError("Make sure to pass HTML strings in case `parse_html` is set to `True`") + + if nodes is not None or xpaths is not None or node_labels is not None: + raise ValueError( + "Please don't pass nodes, xpaths nor node labels in case `parse_html` is set to `True`" + ) + + features = self.feature_extractor(html_strings) + nodes = features["nodes"] + xpaths = features["xpaths"] + else: + if html_strings is not None: + raise ValueError("You have passed HTML strings but `parse_html` is set to `False`.") + if nodes is None or xpaths is None: + raise ValueError("Make sure to pass nodes and xpaths in case `parse_html` is set to `False`") + + # # second, apply the tokenizer + if questions is not None and self.parse_html: + if isinstance(questions, str): + questions = [questions] # add batch dimension (as the feature extractor always adds a batch dimension) + + encoded_inputs = self.tokenizer( + text=questions if questions is not None else nodes, + text_pair=nodes if questions is not None else None, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + return_tensors=return_tensors, + **kwargs, + ) + + return encoded_inputs + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer + to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to TrOCRTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the + docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) diff --git a/src/transformers/models/markuplm/tokenization_markuplm.py b/src/transformers/models/markuplm/tokenization_markuplm.py new file mode 100644 index 00000000000000..bf8d4e6dd90de4 --- /dev/null +++ b/src/transformers/models/markuplm/tokenization_markuplm.py @@ -0,0 +1,1461 @@ +# coding=utf-8 +# Copyright Microsoft Research and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization class for MarkupLM.""" + +import json +import os +from functools import lru_cache +from typing import Dict, List, Optional, Tuple, Union + +import regex as re + +from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...tokenization_utils_base import ( + ENCODE_KWARGS_DOCSTRING, + BatchEncoding, + EncodedInput, + PreTokenizedInput, + TextInput, + TextInputPair, + TruncationStrategy, +) +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/vocab.json", + "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/vocab.json", + }, + "merges_file": { + "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/merges.txt", + "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/merges.txt", + }, +} + + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "microsoft/markuplm-base": 512, + "microsoft/markuplm-large": 512, +} + + +MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r""" + add_special_tokens (`bool`, *optional*, defaults to `True`): + Whether or not to encode the sequences with the special tokens relative to their model. + padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): + Activates and controls padding. Accepts the following values: + + - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single + sequence if provided). + - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum + acceptable input length for the model if that argument is not provided. + - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different + lengths). + truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): + Activates and controls truncation. Accepts the following values: + + - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or + to the maximum acceptable input length for the model if that argument is not provided. This will + truncate token by token, removing a token from the longest sequence in the pair if a pair of + sequences (or a batch of pairs) is provided. + - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths + greater than the model maximum admissible input size). + max_length (`int`, *optional*): + Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to + `None`, this will use the predefined model maximum length if a maximum length is required by one of the + truncation/padding parameters. If the model has no specific maximum input length (like XLNet) + truncation/padding to a maximum length will be deactivated. + stride (`int`, *optional*, defaults to 0): + If set to a number along with `max_length`, the overflowing tokens returned when + `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence + returned to provide some overlap between truncated and overflowing sequences. The value of this + argument defines the number of overlapping tokens. + pad_to_multiple_of (`int`, *optional*): + If set will pad the sequence to a multiple of the provided value. This is especially useful to enable + the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). + return_tensors (`str` or [`~file_utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return Numpy `np.ndarray` objects. +""" + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # + of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset + you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe + vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """ + Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length + strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class MarkupLMTokenizer(PreTrainedTokenizer): + r""" + Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE). [`MarkupLMTokenizer`] can be used to + turn HTML strings into to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_tags_seq` and + `xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. + Users should refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (RoBERTa tokenizer detect beginning of words by the preceding space). + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__( + self, + vocab_file, + merges_file, + tags_dict, + errors="replace", + bos_token="", + eos_token="", + sep_token="", + cls_token="", + unk_token="", + pad_token="", + mask_token="", + add_prefix_space=False, + max_depth=50, + max_width=1000, + pad_width=1001, + pad_token_label=-100, + only_label_first_subword=True, + **kwargs + ): + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token + cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token + + # Mask token behave like a normal word, i.e. include the space before it + mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token + + super().__init__( + vocab_file=vocab_file, + merges_file=merges_file, + tags_dict=tags_dict, + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + sep_token=sep_token, + cls_token=cls_token, + pad_token=pad_token, + mask_token=mask_token, + add_prefix_space=add_prefix_space, + max_depth=max_depth, + max_width=max_width, + pad_width=pad_width, + pad_token_label=pad_token_label, + only_label_first_subword=only_label_first_subword, + **kwargs, + ) + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + + self.tags_dict = tags_dict + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().split("\n")[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + self.add_prefix_space = add_prefix_space + + # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions + self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + # additional properties + self.max_depth = max_depth + self.max_width = max_width + self.pad_width = pad_width + self.unk_tag_id = len(self.tags_dict) + self.pad_tag_id = self.unk_tag_id + 1 + self.pad_xpath_tags_seq = [self.pad_tag_id] * self.max_depth + self.pad_xpath_subs_seq = [self.pad_width] * self.max_depth + self.pad_token_label = pad_token_label + self.only_label_first_subword = only_label_first_subword + + def get_xpath_seq(self, xpath): + """ + Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of + tag IDs and corresponding subscripts, taking into account max depth. + """ + xpath_tags_list = [] + xpath_subs_list = [] + + xpath_units = xpath.split("/") + for unit in xpath_units: + if not unit.strip(): + continue + name_subs = unit.strip().split("[") + tag_name = name_subs[0] + sub = 0 if len(name_subs) == 1 else int(name_subs[1][:-1]) + xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id)) + xpath_subs_list.append(min(self.max_width, sub)) + + xpath_tags_list = xpath_tags_list[: self.max_depth] + xpath_subs_list = xpath_tags_list[: self.max_depth] + xpath_tags_list += [self.pad_tag_id] * (self.max_depth - len(xpath_tags_list)) + xpath_subs_list += [self.pad_width] * (self.max_depth - len(xpath_subs_list)) + + return xpath_tags_list, xpath_subs_list + + @property + def vocab_size(self): + return len(self.encoder) + + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + logger.warning( + "MarkupLM now does not support generative tasks, decoding is experimental and subject to change." + ) + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + # save vocab_file + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + # save merge_file + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): + add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) + if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): + text = " " + text + return (text, kwargs) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A RoBERTa sequence has the following format: + - single sequence: ` X ` + - pair of sequences: ` A B ` + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def build_xpath_tags_with_special_tokens( + self, xpath_tags_0: List[int], xpath_tags_1: Optional[List[int]] = None + ) -> List[int]: + pad = [self.pad_xpath_tags_seq] + if len(xpath_tags_1) == 0: + return pad + xpath_tags_0 + pad + return pad + xpath_tags_0 + pad + xpath_tags_1 + pad + + def build_xpath_subs_with_special_tokens( + self, xpath_subs_0: List[int], xpath_subs_1: Optional[List[int]] = None + ) -> List[int]: + pad = [self.pad_xpath_subs_seq] + if len(xpath_subs_1) == 0: + return pad + xpath_subs_0 + pad + return pad + xpath_subs_0 + pad + xpath_subs_1 + pad + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Args: + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not + make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0] + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def __call__( + self, + text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], + text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, + xpaths: Union[List[List[int]], List[List[List[int]]]] = None, + node_labels: Optional[Union[List[int], List[List[int]]]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> BatchEncoding: + """ + Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of + sequences with node-level xpaths and optional labels. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings + (nodes of a single example or questions of a batch of examples) or a list of list of strings (batch of + nodes). + text_pair (`List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence should be a list of strings + (pretokenized string). + xpaths (`List[List[int]]`, `List[List[List[int]]]`): + Node-level xpaths. + node_labels (`List[int]`, `List[List[int]]`, *optional*): + Node-level integer labels (for token classification tasks). + """ + + # Input type checking for clearer error + def _is_valid_text_input(t): + if isinstance(t, str): + # Strings are fine + return True + elif isinstance(t, (list, tuple)): + # List are fine as long as they are... + if len(t) == 0: + # ... empty + return True + elif isinstance(t[0], str): + # ... list of strings + return True + elif isinstance(t[0], (list, tuple)): + # ... list with an empty list or with a list of strings + return len(t[0]) == 0 or isinstance(t[0][0], str) + else: + return False + else: + return False + + if text_pair is not None: + # in case text + text_pair are provided, text = questions, text_pair = nodes + if not _is_valid_text_input(text): + raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") + if not isinstance(text_pair, (list, tuple)): + raise ValueError( + "Nodes must be of type `List[str]` (single pretokenized example), " + "or `List[List[str]]` (batch of pretokenized examples)." + ) + else: + # in case only text is provided => must be nodes + if not isinstance(text, (list, tuple)): + raise ValueError( + "Nodes must be of type `List[str]` (single pretokenized example), " + "or `List[List[str]]` (batch of pretokenized examples)." + ) + + if text_pair is not None: + is_batched = isinstance(text, (list, tuple)) + else: + is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) + + nodes = text if text_pair is None else text_pair + assert xpaths is not None, "You must provide corresponding xpaths" + if is_batched: + assert len(nodes) == len(xpaths), "You must provide nodes and xpaths for an equal amount of examples" + for nodes_example, xpaths_example in zip(nodes, xpaths): + assert len(nodes_example) == len(xpaths_example), "You must provide as many nodes as there are xpaths" + else: + assert len(nodes) == len(xpaths), "You must provide as many nodes as there are xpaths" + + if is_batched: + if text_pair is not None and len(text) != len(text_pair): + raise ValueError( + f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" + f" {len(text_pair)}." + ) + batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text + is_pair = bool(text_pair is not None) + return self.batch_encode_plus( + batch_text_or_text_pairs=batch_text_or_text_pairs, + is_pair=is_pair, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + else: + return self.encode_plus( + text=text, + text_pair=text_pair, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], + List[TextInputPair], + List[PreTokenizedInput], + ], + is_pair: bool = None, + xpaths: Optional[List[List[List[int]]]] = None, + node_labels: Optional[Union[List[int], List[List[int]]]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> BatchEncoding: + # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' + padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + **kwargs, + ) + + return self._batch_encode_plus( + batch_text_or_text_pairs=batch_text_or_text_pairs, + is_pair=is_pair, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + def _batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], + List[TextInputPair], + List[PreTokenizedInput], + ], + is_pair: bool = None, + xpaths: Optional[List[List[List[int]]]] = None, + node_labels: Optional[List[List[int]]] = None, + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> BatchEncoding: + if return_offsets_mapping: + raise NotImplementedError( + "return_offset_mapping is not available when using Python tokenizers. " + "To use this feature, change your tokenizer to one deriving from " + "transformers.PreTrainedTokenizerFast." + ) + + batch_outputs = self._batch_prepare_for_model( + batch_text_or_text_pairs=batch_text_or_text_pairs, + is_pair=is_pair, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + return_token_type_ids=return_token_type_ids, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_length=return_length, + return_tensors=return_tensors, + verbose=verbose, + ) + + return BatchEncoding(batch_outputs) + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def _batch_prepare_for_model( + self, + batch_text_or_text_pairs, + is_pair: bool = None, + xpaths: Optional[List[List[int]]] = None, + node_labels: Optional[List[List[int]]] = None, + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[str] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_length: bool = False, + verbose: bool = True, + ) -> BatchEncoding: + """ + Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It + adds special tokens, truncates sequences if overflowing while taking into account the special tokens and + manages a moving window (with user defined stride) for overflowing tokens. + + Args: + batch_ids_pairs: list of tokenized input ids or input ids pairs + """ + + batch_outputs = {} + for idx, example in enumerate(zip(batch_text_or_text_pairs, xpaths)): + batch_text_or_text_pair, xpaths_example = example + outputs = self.prepare_for_model( + batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair, + batch_text_or_text_pair[1] if is_pair else None, + xpaths_example, + node_labels=node_labels[idx] if node_labels is not None else None, + add_special_tokens=add_special_tokens, + padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward + truncation=truncation_strategy.value, + max_length=max_length, + stride=stride, + pad_to_multiple_of=None, # we pad in batch afterward + return_attention_mask=False, # we pad in batch afterward + return_token_type_ids=return_token_type_ids, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_length=return_length, + return_tensors=None, # We convert the whole batch to tensors at the end + prepend_batch_axis=False, + verbose=verbose, + ) + + for key, value in outputs.items(): + if key not in batch_outputs: + batch_outputs[key] = [] + batch_outputs[key].append(value) + + batch_outputs = self.pad( + batch_outputs, + padding=padding_strategy.value, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + + batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors) + + return batch_outputs + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING) + def encode( + self, + text: Union[TextInput, PreTokenizedInput], + text_pair: Optional[PreTokenizedInput] = None, + xpaths: Optional[List[List[int]]] = None, + node_labels: Optional[List[int]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> List[int]: + encoded_inputs = self.encode_plus( + text=text, + text_pair=text_pair, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + return encoded_inputs["input_ids"] + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def encode_plus( + self, + text: Union[TextInput, PreTokenizedInput], + text_pair: Optional[PreTokenizedInput] = None, + xpaths: Optional[List[List[int]]] = None, + node_labels: Optional[List[int]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> BatchEncoding: + """ + Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, + `__call__` should be used instead. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. + text_pair (`List[str]` or `List[int]`, *optional*): + Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a + list of list of strings (nodes of a batch of examples). + """ + + # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' + padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + **kwargs, + ) + + return self._encode_plus( + text=text, + xpaths=xpaths, + text_pair=text_pair, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + def _encode_plus( + self, + text: Union[TextInput, PreTokenizedInput], + text_pair: Optional[PreTokenizedInput] = None, + xpaths: Optional[List[List[int]]] = None, + node_labels: Optional[List[int]] = None, + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> BatchEncoding: + if return_offsets_mapping: + raise NotImplementedError( + "return_offset_mapping is not available when using Python tokenizers. " + "To use this feature, change your tokenizer to one deriving from " + "transformers.PreTrainedTokenizerFast. " + "More information on available tokenizers at " + "https://github.com/huggingface/transformers/pull/2674" + ) + + return self.prepare_for_model( + text=text, + text_pair=text_pair, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding=padding_strategy.value, + truncation=truncation_strategy.value, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + prepend_batch_axis=True, + return_attention_mask=return_attention_mask, + return_token_type_ids=return_token_type_ids, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_length=return_length, + verbose=verbose, + ) + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def prepare_for_model( + self, + text: Union[TextInput, PreTokenizedInput], + text_pair: Optional[PreTokenizedInput] = None, + xpaths: Optional[List[List[int]]] = None, + node_labels: Optional[List[int]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + prepend_batch_axis: bool = False, + **kwargs + ) -> BatchEncoding: + """ + Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens, + truncates sequences if overflowing while taking into account the special tokens and manages a moving window + (with user defined stride) for overflowing tokens. Please Note, for *text_pair* different than `None` and + *truncation_strategy = longest_first* or `True`, it is not possible to return overflowing tokens. Such a + combination of arguments will raise an error. + + Node-level `xpaths` are turned into token-level `xpath_tags_seq` and `xpath_subs_seq`. If provided, node-level + `node_labels` are turned into token-level `labels`. The node label is used for the first token of the node, + while remaining tokens are labeled with -100, such that they will be ignored by the loss function. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. + text_pair (`List[str]` or `List[int]`, *optional*): + Optional second sequence to be encoded. This can be a list of strings (nodes of a single example) or a + list of list of strings (nodes of a batch of examples). + """ + + # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' + padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + **kwargs, + ) + + tokens = [] + pair_tokens = [] + xpath_tags_seq = [] + xpath_subs_seq = [] + pair_xpath_tags_seq = [] + pair_xpath_subs_seq = [] + labels = [] + + if text_pair is None: + if node_labels is None: + # CASE 1: web page classification (training + inference) + CASE 2: token classification (inference) + for word, xpath in zip(text, xpaths): + if len(word) < 1: # skip empty nodes + continue + word_tokens = self.tokenize(word) + tokens.extend(word_tokens) + xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath) + xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens)) + xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens)) + else: + # CASE 2: token classification (training) + for word, xpath, label in zip(text, xpaths, node_labels): + if len(word) < 1: # skip empty nodes + continue + word_tokens = self.tokenize(word) + tokens.extend(word_tokens) + xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath) + xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens)) + xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens)) + if self.only_label_first_subword: + # Use the real label id for the first token of the word, and padding ids for the remaining tokens + labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1)) + else: + labels.extend([label] * len(word_tokens)) + else: + # CASE 3: web page question answering (inference) + # text = question + # text_pair = nodes + tokens = self.tokenize(text) + xpath_tags_seq = [self.pad_xpath_tags_seq for _ in range(len(tokens))] + xpath_subs_seq = [self.pad_xpath_subs_seq for _ in range(len(tokens))] + + for word, xpath in zip(text_pair, xpaths): + if len(word) < 1: # skip empty nodes + continue + word_tokens = self.tokenize(word) + pair_tokens.extend(word_tokens) + xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpath) + pair_xpath_tags_seq.extend([xpath_tags_list] * len(word_tokens)) + pair_xpath_subs_seq.extend([xpath_subs_list] * len(word_tokens)) + + # Create ids + pair_ids + ids = self.convert_tokens_to_ids(tokens) + pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None + + if ( + return_overflowing_tokens + and truncation_strategy == TruncationStrategy.LONGEST_FIRST + and pair_ids is not None + ): + raise ValueError( + "Not possible to return overflowing tokens for pair of sequences with the " + "`longest_first`. Please select another truncation strategy than `longest_first`, " + "for instance `only_second` or `only_first`." + ) + + # Compute the total size of the returned encodings + pair = bool(pair_ids is not None) + len_ids = len(ids) + len_pair_ids = len(pair_ids) if pair else 0 + total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0) + + # Truncation: Handle max sequence length + overflowing_tokens = [] + overflowing_xpath_tags_seq = [] + overflowing_xpath_subs_seq = [] + overflowing_labels = [] + if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length: + ( + ids, + xpath_tags_seq, + xpath_subs_seq, + pair_ids, + pair_xpath_tags_seq, + pair_xpath_subs_seq, + labels, + overflowing_tokens, + overflowing_xpath_tags_seq, + overflowing_xpath_subs_seq, + overflowing_labels, + ) = self.truncate_sequences( + ids, + xpath_tags_seq=xpath_tags_seq, + xpath_subs_seq=xpath_subs_seq, + pair_ids=pair_ids, + pair_xpath_tags_seq=pair_xpath_tags_seq, + pair_xpath_subs_seq=pair_xpath_subs_seq, + labels=labels, + num_tokens_to_remove=total_len - max_length, + truncation_strategy=truncation_strategy, + stride=stride, + ) + + if return_token_type_ids and not add_special_tokens: + raise ValueError( + "Asking to return token_type_ids while setting add_special_tokens to False " + "results in an undefined behavior. Please set add_special_tokens to True or " + "set return_token_type_ids to None." + ) + + # Load from model defaults + if return_token_type_ids is None: + return_token_type_ids = "token_type_ids" in self.model_input_names + if return_attention_mask is None: + return_attention_mask = "attention_mask" in self.model_input_names + + encoded_inputs = {} + + if return_overflowing_tokens: + encoded_inputs["overflowing_tokens"] = overflowing_tokens + encoded_inputs["overflowing_xpath_tags_seq"] = overflowing_xpath_tags_seq + encoded_inputs["overflowing_xpath_subs_seq"] = overflowing_xpath_subs_seq + encoded_inputs["overflowing_labels"] = overflowing_labels + encoded_inputs["num_truncated_tokens"] = total_len - max_length + + # Add special tokens + if add_special_tokens: + sequence = self.build_inputs_with_special_tokens(ids, pair_ids) + token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids) + xpath_tags_ids = self.build_xpath_tags_with_special_tokens(xpath_tags_seq, pair_xpath_tags_seq) + xpath_subs_ids = self.build_xpath_subs_with_special_tokens(xpath_subs_seq, pair_xpath_subs_seq) + if labels: + labels = [self.pad_token_label] + labels + [self.pad_token_label] + else: + sequence = ids + pair_ids if pair else ids + token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else []) + xpath_tags_ids = xpath_tags_seq + pair_xpath_tags_seq if pair else xpath_tags_seq + xpath_subs_ids = xpath_subs_seq + pair_xpath_subs_seq if pair else xpath_subs_seq + + # Build output dictionary + encoded_inputs["input_ids"] = sequence + encoded_inputs["xpath_tags_seq"] = xpath_tags_ids + encoded_inputs["xpath_subs_seq"] = xpath_subs_ids + if return_token_type_ids: + encoded_inputs["token_type_ids"] = token_type_ids + if return_special_tokens_mask: + if add_special_tokens: + encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids) + else: + encoded_inputs["special_tokens_mask"] = [0] * len(sequence) + + if labels: + encoded_inputs["labels"] = labels + + # Check lengths + self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose) + + # Padding + if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask: + encoded_inputs = self.pad( + encoded_inputs, + max_length=max_length, + padding=padding_strategy.value, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=return_attention_mask, + ) + + if return_length: + encoded_inputs["length"] = len(encoded_inputs["input_ids"]) + + batch_outputs = BatchEncoding( + encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis + ) + + return batch_outputs + + def truncate_sequences( + self, + ids: List[int], + xpath_tags_seq: List[List[int]], + xpath_subs_seq: List[List[int]], + pair_ids: Optional[List[int]] = None, + pair_xpath_tags_seq: Optional[List[List[int]]] = None, + pair_xpath_subs_seq: Optional[List[List[int]]] = None, + labels: Optional[List[int]] = None, + num_tokens_to_remove: int = 0, + truncation_strategy: Union[str, TruncationStrategy] = "longest_first", + stride: int = 0, + ) -> Tuple[List[int], List[int], List[int]]: + """ + Args: + Truncates a sequence pair in-place following the strategy. + ids (`List[int]`): + Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and + `convert_tokens_to_ids` methods. + xpath_tags_seq (`List[List[int]]`): + XPath tag IDs of the first sequence. + xpath_subs_seq (`List[List[int]]`): + XPath sub IDs of the first sequence. + pair_ids (`List[int]`, *optional*): + Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` + and `convert_tokens_to_ids` methods. + pair_xpath_tags_seq (`List[List[int]]`, *optional*): + XPath tag IDs of the second sequence. + pair_xpath_subs_seq (`List[List[int]]`, *optional*): + XPath sub IDs of the second sequence. + num_tokens_to_remove (`int`, *optional*, defaults to 0): + Number of tokens to remove using the truncation strategy. + truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to + `False`): + The strategy to follow for truncation. Can be: + - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will truncate + token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a + batch of pairs) is provided. + - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the + maximum acceptable input length for the model if that argument is not provided. This will only + truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater + than the model maximum admissible input size). + stride (`int`, *optional*, defaults to 0): + If set to a positive number, the overflowing tokens returned will contain some tokens from the main + sequence returned. The value of this argument defines the number of additional tokens. + Returns: + `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of + overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair + of sequences (or a batch of pairs) is provided. + """ + if num_tokens_to_remove <= 0: + return ids, xpath_tags_seq, xpath_subs_seq, pair_ids, pair_xpath_tags_seq, pair_xpath_subs_seq, [], [], [] + + if not isinstance(truncation_strategy, TruncationStrategy): + truncation_strategy = TruncationStrategy(truncation_strategy) + + overflowing_tokens = [] + overflowing_xpath_tags_seq = [] + overflowing_xpath_subs_seq = [] + overflowing_labels = [] + if truncation_strategy == TruncationStrategy.ONLY_FIRST or ( + truncation_strategy == TruncationStrategy.LONGEST_FIRST and pair_ids is None + ): + if len(ids) > num_tokens_to_remove: + window_len = min(len(ids), stride + num_tokens_to_remove) + overflowing_tokens = ids[-window_len:] + overflowing_xpath_tags_seq = xpath_tags_seq[-window_len:] + overflowing_xpath_subs_seq = xpath_subs_seq[-window_len:] + ids = ids[:-num_tokens_to_remove] + xpath_tags_seq = xpath_tags_seq[:-num_tokens_to_remove] + xpath_subs_seq = xpath_subs_seq[:-num_tokens_to_remove] + labels = labels[:-num_tokens_to_remove] + else: + error_msg = ( + f"We need to remove {num_tokens_to_remove} to truncate the input " + f"but the first sequence has a length {len(ids)}. " + ) + if truncation_strategy == TruncationStrategy.ONLY_FIRST: + error_msg = ( + error_msg + + "Please select another truncation strategy than " + f"{truncation_strategy}, for instance 'longest_first' or 'only_second'." + ) + logger.error(error_msg) + elif truncation_strategy == TruncationStrategy.LONGEST_FIRST: + logger.warning( + "Be aware, overflowing tokens are not returned for the setting you have chosen," + f" i.e. sequence pairs with the '{TruncationStrategy.LONGEST_FIRST.value}' " + "truncation strategy. So the returned list will always be empty even if some " + "tokens have been removed." + ) + for _ in range(num_tokens_to_remove): + if pair_ids is None or len(ids) > len(pair_ids): + ids = ids[:-1] + xpath_tags_seq = xpath_tags_seq[:-1] + xpath_subs_seq = xpath_subs_seq[:-1] + labels = labels[:-1] + else: + pair_ids = pair_ids[:-1] + pair_xpath_tags_seq = pair_xpath_tags_seq[:-1] + pair_xpath_subs_seq = pair_xpath_subs_seq[:-1] + elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None: + if len(pair_ids) > num_tokens_to_remove: + window_len = min(len(pair_ids), stride + num_tokens_to_remove) + overflowing_tokens = pair_ids[-window_len:] + overflowing_xpath_tags_seq = pair_xpath_tags_seq[-window_len:] + overflowing_xpath_subs_seq = pair_xpath_subs_seq[-window_len:] + pair_ids = pair_ids[:-num_tokens_to_remove] + pair_xpath_tags_seq = pair_xpath_tags_seq[:-num_tokens_to_remove] + pair_xpath_subs_seq = pair_xpath_subs_seq[:-num_tokens_to_remove] + else: + logger.error( + f"We need to remove {num_tokens_to_remove} to truncate the input " + f"but the second sequence has a length {len(pair_ids)}. " + f"Please select another truncation strategy than {truncation_strategy}, " + "for instance 'longest_first' or 'only_first'." + ) + + return ( + ids, + xpath_tags_seq, + xpath_subs_seq, + pair_ids, + pair_xpath_tags_seq, + pair_xpath_subs_seq, + labels, + overflowing_tokens, + overflowing_xpath_tags_seq, + overflowing_xpath_subs_seq, + overflowing_labels, + ) + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Args: + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + encoded_inputs: + Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + >= 7.5 (Volta). + return_attention_mask: + (optional) Set to False to avoid returning attention mask (default: set to model specifics) + """ + # Load from model defaults + if return_attention_mask is None: + return_attention_mask = "attention_mask" in self.model_input_names + + required_input = encoded_inputs[self.model_input_names[0]] + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if return_attention_mask and "attention_mask" not in encoded_inputs: + encoded_inputs["attention_mask"] = [1] * len(required_input) + + if needs_to_be_padded: + difference = max_length - len(required_input) + if self.padding_side == "right": + if return_attention_mask: + encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = ( + encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference + ) + if "xpath_tags_seq" in encoded_inputs: + encoded_inputs["xpath_tags_seq"] = ( + encoded_inputs["xpath_tags_seq"] + [self.pad_xpath_tags_seq] * difference + ) + if "xpath_subs_seq" in encoded_inputs: + encoded_inputs["xpath_subs_seq"] = ( + encoded_inputs["xpath_subs_seq"] + [self.pad_xpath_subs_seq] * difference + ) + if "labels" in encoded_inputs: + encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference + encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference + elif self.padding_side == "left": + if return_attention_mask: + encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ + "token_type_ids" + ] + if "xpath_tags_seq" in encoded_inputs: + encoded_inputs["xpath_tags_seq"] = [self.pad_xpath_tags_seq] * difference + encoded_inputs[ + "xpath_tags_seq" + ] + if "xpath_subs_seq" in encoded_inputs: + encoded_inputs["xpath_subs_seq"] = [self.pad_xpath_subs_seq] * difference + encoded_inputs[ + "xpath_subs_seq" + ] + if "labels" in encoded_inputs: + encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + else: + raise ValueError("Invalid padding strategy:" + str(self.padding_side)) + + return encoded_inputs diff --git a/src/transformers/models/markuplm/tokenization_markuplm_fast.py b/src/transformers/models/markuplm/tokenization_markuplm_fast.py new file mode 100644 index 00000000000000..5e76f4d0bc181b --- /dev/null +++ b/src/transformers/models/markuplm/tokenization_markuplm_fast.py @@ -0,0 +1,924 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fast tokenization class for MarkupLM. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus +and _encode_plus, in which the Rust tokenizer is used. +""" + +import json +from functools import lru_cache +from typing import Dict, List, Optional, Tuple, Union + +from tokenizers import pre_tokenizers, processors + +from ...file_utils import PaddingStrategy, TensorType, add_end_docstrings +from ...tokenization_utils_base import ( + ENCODE_KWARGS_DOCSTRING, + BatchEncoding, + EncodedInput, + PreTokenizedInput, + TextInput, + TextInputPair, + TruncationStrategy, +) +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from .tokenization_markuplm import MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING, MarkupLMTokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/vocab.json", + "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/vocab.json", + }, + "merges_file": { + "microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/merges.txt", + "microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/merges.txt", + }, +} + + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "microsoft/markuplm-base": 512, + "microsoft/markuplm-large": 512, +} + + +@lru_cache() +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # + of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset + you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe + vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + """ + Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length + strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class MarkupLMTokenizerFast(PreTrainedTokenizerFast): + r""" + Construct a MarkupLM tokenizer. Based on byte-level Byte-Pair-Encoding (BPE). + + [`MarkupLMTokenizerFast`] can be used to turn HTML strings into to token-level `input_ids`, `attention_mask`, + `token_type_ids`, `xpath_tags_seq` and `xpath_tags_seq`. This tokenizer inherits from [`PreTrainedTokenizer`] which + contains most of the main methods. + + Users should refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (RoBERTa tokenizer detect beginning of words by the preceding space). + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + slow_tokenizer_class = MarkupLMTokenizer + + def __init__( + self, + vocab_file, + merges_file, + tags_dict, + tokenizer_file=None, + errors="replace", + bos_token="", + eos_token="", + sep_token="", + cls_token="", + unk_token="", + pad_token="", + mask_token="", + add_prefix_space=False, + max_depth=50, + max_width=1000, + pad_width=1001, + pad_token_label=-100, + only_label_first_subword=True, + trim_offsets=False, + **kwargs + ): + super().__init__( + vocab_file=vocab_file, + merges_file=merges_file, + tags_dict=tags_dict, + tokenizer_file=tokenizer_file, + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + sep_token=sep_token, + cls_token=cls_token, + pad_token=pad_token, + mask_token=mask_token, + add_prefix_space=add_prefix_space, + trim_offsets=trim_offsets, + max_depth=max_depth, + max_width=max_width, + pad_width=pad_width, + pad_token_label=pad_token_label, + only_label_first_subword=only_label_first_subword, + **kwargs, + ) + if trim_offsets: + # Not implemented yet, because we need to chain two post processors which is not possible yet + # We need to wait for https://github.com/huggingface/tokenizers/pull/1005 + # With `trim_offsets=False` we don't need to do add `processors.ByteLevel(trim_offsets=False)` + # because it's not doing anything + raise NotImplementedError( + "`trim_offsets=True` is not implemented for MarkupLMTokenizerFast. Please set it to False." + ) + + self.tags_dict = tags_dict + + pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) + if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) + pre_tok_state["add_prefix_space"] = add_prefix_space + self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) + + self.add_prefix_space = add_prefix_space + + tokenizer_component = "post_processor" + tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) + if tokenizer_component_instance: + state = json.loads(tokenizer_component_instance.__getstate__()) + + # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` + if "sep" in state: + state["sep"] = tuple(state["sep"]) + if "cls" in state: + state["cls"] = tuple(state["cls"]) + + changes_to_apply = False + + if state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + state["add_prefix_space"] = add_prefix_space + changes_to_apply = True + + if changes_to_apply: + component_class = getattr(processors, state.pop("type")) + new_value = component_class(**state) + setattr(self.backend_tokenizer, tokenizer_component, new_value) + + # additional properties + self.max_depth = max_depth + self.max_width = max_width + self.pad_width = pad_width + self.unk_tag_id = len(self.tags_dict) + self.pad_tag_id = self.unk_tag_id + 1 + self.pad_xpath_tags_seq = [self.pad_tag_id] * self.max_depth + self.pad_xpath_subs_seq = [self.pad_width] * self.max_depth + self.pad_token_label = pad_token_label + self.only_label_first_subword = only_label_first_subword + + def get_xpath_seq(self, xpath): + """ + Given the xpath expression of one particular node (like "/html/body/div/li[1]/div/span[2]"), return a list of + tag IDs and corresponding subscripts, taking into account max depth. + """ + xpath_tags_list = [] + xpath_subs_list = [] + + xpath_units = xpath.split("/") + for unit in xpath_units: + if not unit.strip(): + continue + name_subs = unit.strip().split("[") + tag_name = name_subs[0] + sub = 0 if len(name_subs) == 1 else int(name_subs[1][:-1]) + xpath_tags_list.append(self.tags_dict.get(tag_name, self.unk_tag_id)) + xpath_subs_list.append(min(self.max_width, sub)) + + xpath_tags_list = xpath_tags_list[: self.max_depth] + xpath_subs_list = xpath_tags_list[: self.max_depth] + xpath_tags_list += [self.pad_tag_id] * (self.max_depth - len(xpath_tags_list)) + xpath_subs_list += [self.pad_width] * (self.max_depth - len(xpath_subs_list)) + + return xpath_tags_list, xpath_subs_list + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def __call__( + self, + text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], + text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, + xpaths: Union[List[List[int]], List[List[List[int]]]] = None, + node_labels: Optional[Union[List[int], List[List[int]]]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> BatchEncoding: + """ + Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of + sequences with nodes, xpaths and optional labels. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings + (words of a single example or questions of a batch of examples) or a list of list of strings (batch of + words). + text_pair (`List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence should be a list of strings + (pretokenized string). + xpaths (`List[List[int]]`, `List[List[List[int]]]`): + Node-level xpaths. Each bounding box should be normalized to be on a 0-1000 scale. + node_labels (`List[int]`, `List[List[int]]`, *optional*): + Node-level integer labels (for token classification tasks). + """ + # Input type checking for clearer error + def _is_valid_text_input(t): + if isinstance(t, str): + # Strings are fine + return True + elif isinstance(t, (list, tuple)): + # List are fine as long as they are... + if len(t) == 0: + # ... empty + return True + elif isinstance(t[0], str): + # ... list of strings + return True + elif isinstance(t[0], (list, tuple)): + # ... list with an empty list or with a list of strings + return len(t[0]) == 0 or isinstance(t[0][0], str) + else: + return False + else: + return False + + if text_pair is not None: + # in case text + text_pair are provided, text = questions, text_pair = nodes + if not _is_valid_text_input(text): + raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") + if not isinstance(text_pair, (list, tuple)): + raise ValueError( + "Nodes must be of type `List[str]` (single pretokenized example), " + "or `List[List[str]]` (batch of pretokenized examples)." + ) + else: + # in case only text is provided => must be nodes + if not isinstance(text, (list, tuple)): + raise ValueError( + "Nodes must be of type `List[str]` (single pretokenized example), " + "or `List[List[str]]` (batch of pretokenized examples)." + ) + + if text_pair is not None: + is_batched = isinstance(text, (list, tuple)) + else: + is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) + + nodes = text if text_pair is None else text_pair + assert xpaths is not None, "You must provide corresponding xpaths" + if is_batched: + assert len(nodes) == len(xpaths), "You must provide nodes and xpaths for an equal amount of examples" + for nodes_example, xpaths_example in zip(nodes, xpaths): + assert len(nodes_example) == len(xpaths_example), "You must provide as many nodes as there are xpaths" + else: + assert len(nodes) == len(xpaths), "You must provide as many nodes as there are xpaths" + + if is_batched: + if text_pair is not None and len(text) != len(text_pair): + raise ValueError( + f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" + f" {len(text_pair)}." + ) + batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text + is_pair = bool(text_pair is not None) + return self.batch_encode_plus( + batch_text_or_text_pairs=batch_text_or_text_pairs, + is_pair=is_pair, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + else: + return self.encode_plus( + text=text, + text_pair=text_pair, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding=padding, + truncation=truncation, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], + List[TextInputPair], + List[PreTokenizedInput], + ], + is_pair: bool = None, + xpaths: Optional[List[List[List[int]]]] = None, + node_labels: Optional[Union[List[int], List[List[int]]]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> BatchEncoding: + # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' + padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + **kwargs, + ) + + return self._batch_encode_plus( + batch_text_or_text_pairs=batch_text_or_text_pairs, + is_pair=is_pair, + xpaths=xpaths, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: + batched_input = [(text, pair)] if pair else [text] + encodings = self._tokenizer.encode_batch( + batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs + ) + + return encodings[0].tokens + + @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, MARKUPLM_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING) + def encode_plus( + self, + text: Union[TextInput, PreTokenizedInput], + text_pair: Optional[PreTokenizedInput] = None, + xpaths: Optional[List[List[int]]] = None, + node_labels: Optional[List[int]] = None, + add_special_tokens: bool = True, + padding: Union[bool, str, PaddingStrategy] = False, + truncation: Union[bool, str, TruncationStrategy] = False, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> BatchEncoding: + """ + Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated, + `__call__` should be used instead. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings. + text_pair (`List[str]` or `List[int]`, *optional*): + Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a + list of list of strings (words of a batch of examples). + """ + + # Backward compatibility for 'truncation_strategy', 'pad_to_max_length' + padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies( + padding=padding, + truncation=truncation, + max_length=max_length, + pad_to_multiple_of=pad_to_multiple_of, + verbose=verbose, + **kwargs, + ) + + return self._encode_plus( + text=text, + xpaths=xpaths, + text_pair=text_pair, + node_labels=node_labels, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + def _batch_encode_plus( + self, + batch_text_or_text_pairs: Union[ + List[TextInput], + List[TextInputPair], + List[PreTokenizedInput], + ], + is_pair: bool = None, + xpaths: Optional[List[List[List[int]]]] = None, + node_labels: Optional[List[List[int]]] = None, + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[str] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + ) -> BatchEncoding: + if not isinstance(batch_text_or_text_pairs, list): + raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})") + + # Set the truncation and padding strategy and restore the initial configuration + self.set_truncation_and_padding( + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + ) + + if is_pair: + batch_text_or_text_pairs = [([text], text_pair) for text, text_pair in batch_text_or_text_pairs] + + encodings = self._tokenizer.encode_batch( + batch_text_or_text_pairs, + add_special_tokens=add_special_tokens, + is_pretokenized=True, # we set this to True as MarkupLM always expects pretokenized inputs + ) + + # Convert encoding to dict + # `Tokens` is a tuple of (List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]], + # List[EncodingFast]) with nested dimensions corresponding to batch, overflows, sequence length + tokens_and_encodings = [ + self._convert_encoding( + encoding=encoding, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=True + if node_labels is not None + else return_offsets_mapping, # we use offsets to create the labels + return_length=return_length, + verbose=verbose, + ) + for encoding in encodings + ] + + # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension + # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) + # (we say ~ because the number of overflow varies with the example in the batch) + # + # To match each overflowing sample with the original sample in the batch + # we add an overflow_to_sample_mapping array (see below) + sanitized_tokens = {} + for key in tokens_and_encodings[0][0].keys(): + stack = [e for item, _ in tokens_and_encodings for e in item[key]] + sanitized_tokens[key] = stack + sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] + + # If returning overflowing tokens, we need to return a mapping + # from the batch idx to the original sample + if return_overflowing_tokens: + overflow_to_sample_mapping = [] + for i, (toks, _) in enumerate(tokens_and_encodings): + overflow_to_sample_mapping += [i] * len(toks["input_ids"]) + sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping + + for input_ids in sanitized_tokens["input_ids"]: + self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) + + # create the token-level xpaths tags and subscripts + xpath_tags_seq = [] + xpath_subs_seq = [] + for batch_index in range(len(sanitized_tokens["input_ids"])): + if return_overflowing_tokens: + original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] + else: + original_index = batch_index + xpath_tags_seq_example = [] + xpath_subs_seq_example = [] + for id, sequence_id, word_id in zip( + sanitized_tokens["input_ids"][batch_index], + sanitized_encodings[batch_index].sequence_ids, + sanitized_encodings[batch_index].word_ids, + ): + if word_id is not None: + if is_pair and sequence_id == 0: + xpath_tags_seq_example.append(self.pad_xpath_tags_seq) + xpath_subs_seq_example.append(self.pad_xpath_subs_seq) + else: + xpath_tags_list, xpath_subs_list = self.get_xpath_seq(xpaths[original_index][word_id]) + xpath_tags_seq_example.extend([xpath_tags_list]) + xpath_subs_seq_example.extend([xpath_subs_list]) + else: + if id in [self.cls_token_id, self.sep_token_id, self.pad_token_id]: + xpath_tags_seq_example.append(self.pad_xpath_tags_seq) + xpath_subs_seq_example.append(self.pad_xpath_subs_seq) + else: + raise ValueError("Id not recognized") + xpath_tags_seq.append(xpath_tags_seq_example) + xpath_subs_seq.append(xpath_subs_seq_example) + + sanitized_tokens["xpath_tags_seq"] = xpath_tags_seq + sanitized_tokens["xpath_subs_seq"] = xpath_subs_seq + + # optionally, create the labels + if node_labels is not None: + labels = [] + for batch_index in range(len(sanitized_tokens["input_ids"])): + if return_overflowing_tokens: + original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] + else: + original_index = batch_index + labels_example = [] + for id, offset, word_id in zip( + sanitized_tokens["input_ids"][batch_index], + sanitized_tokens["offset_mapping"][batch_index], + sanitized_encodings[batch_index].word_ids, + ): + if word_id is not None: + if self.only_label_first_subword: + if offset[0] == 0: + # Use the real label id for the first token of the word, and padding ids for the remaining tokens + labels_example.append(node_labels[original_index][word_id]) + else: + labels_example.append(self.pad_token_label) + else: + labels_example.append(node_labels[original_index][word_id]) + else: + labels_example.append(self.pad_token_label) + labels.append(labels_example) + + sanitized_tokens["labels"] = labels + # finally, remove offsets if the user didn't want them + if not return_offsets_mapping: + del sanitized_tokens["offset_mapping"] + + return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) + + def _encode_plus( + self, + text: Union[TextInput, PreTokenizedInput], + text_pair: Optional[PreTokenizedInput] = None, + xpaths: Optional[List[List[int]]] = None, + node_labels: Optional[List[int]] = None, + add_special_tokens: bool = True, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, + max_length: Optional[int] = None, + stride: int = 0, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[bool] = None, + return_token_type_ids: Optional[bool] = None, + return_attention_mask: Optional[bool] = None, + return_overflowing_tokens: bool = False, + return_special_tokens_mask: bool = False, + return_offsets_mapping: bool = False, + return_length: bool = False, + verbose: bool = True, + **kwargs + ) -> BatchEncoding: + # make it a batched input + # 2 options: + # 1) only text, in case text must be a list of str + # 2) text + text_pair, in which case text = str and text_pair a list of str + batched_input = [(text, text_pair)] if text_pair else [text] + batched_xpaths = [xpaths] + batched_node_labels = [node_labels] if node_labels is not None else None + batched_output = self._batch_encode_plus( + batched_input, + is_pair=bool(text_pair is not None), + xpaths=batched_xpaths, + node_labels=batched_node_labels, + add_special_tokens=add_special_tokens, + padding_strategy=padding_strategy, + truncation_strategy=truncation_strategy, + max_length=max_length, + stride=stride, + pad_to_multiple_of=pad_to_multiple_of, + return_tensors=return_tensors, + return_token_type_ids=return_token_type_ids, + return_attention_mask=return_attention_mask, + return_overflowing_tokens=return_overflowing_tokens, + return_special_tokens_mask=return_special_tokens_mask, + return_offsets_mapping=return_offsets_mapping, + return_length=return_length, + verbose=verbose, + **kwargs, + ) + + # Return tensor is None, then we can remove the leading batch axis + # Overflowing tokens are returned as a batch of output so we keep them in this case + if return_tensors is None and not return_overflowing_tokens: + batched_output = BatchEncoding( + { + key: value[0] if len(value) > 0 and isinstance(value[0], list) else value + for key, value in batched_output.items() + }, + batched_output.encodings, + ) + + self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) + + return batched_output + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Args: + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + encoded_inputs: + Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + >= 7.5 (Volta). + return_attention_mask: + (optional) Set to False to avoid returning attention mask (default: set to model specifics) + """ + # Load from model defaults + if return_attention_mask is None: + return_attention_mask = "attention_mask" in self.model_input_names + + required_input = encoded_inputs[self.model_input_names[0]] + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if return_attention_mask and "attention_mask" not in encoded_inputs: + encoded_inputs["attention_mask"] = [1] * len(required_input) + + if needs_to_be_padded: + difference = max_length - len(required_input) + if self.padding_side == "right": + if return_attention_mask: + encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = ( + encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference + ) + if "xpath_tags_seq" in encoded_inputs: + encoded_inputs["xpath_tags_seq"] = ( + encoded_inputs["xpath_tags_seq"] + [self.pad_xpath_tags_seq] * difference + ) + if "xpath_subs_seq" in encoded_inputs: + encoded_inputs["xpath_subs_seq"] = ( + encoded_inputs["xpath_subs_seq"] + [self.pad_xpath_subs_seq] * difference + ) + if "labels" in encoded_inputs: + encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference + encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference + elif self.padding_side == "left": + if return_attention_mask: + encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ + "token_type_ids" + ] + if "xpath_tags_seq" in encoded_inputs: + encoded_inputs["xpath_tags_seq"] = [self.pad_xpath_tags_seq] * difference + encoded_inputs[ + "xpath_tags_seq" + ] + if "xpath_subs_seq" in encoded_inputs: + encoded_inputs["xpath_subs_seq"] = [self.pad_xpath_subs_seq] * difference + encoded_inputs[ + "xpath_subs_seq" + ] + if "labels" in encoded_inputs: + encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + else: + raise ValueError("Invalid padding strategy:" + str(self.padding_side)) + + return encoded_inputs + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A RoBERTa sequence has the following format: + - single sequence: ` X ` + - pair of sequences: ` A B ` + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not + make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + token_ids_1 + sep) * [0] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index b14ed5d589c593..65c15fbd967ba0 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -46,6 +46,7 @@ is_accelerate_available, is_apex_available, is_bitsandbytes_available, + is_bs4_available, is_detectron2_available, is_faiss_available, is_flax_available, @@ -239,6 +240,13 @@ def custom_tokenizers(test_case): return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case) +def require_bs4(test_case): + """ + Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed. + """ + return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case) + + def require_git_lfs(test_case): """ Decorator marking a test that requires git-lfs. diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 9572a673f67181..7f3f704ac4a38b 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -89,6 +89,7 @@ is_accelerate_available, is_apex_available, is_bitsandbytes_available, + is_bs4_available, is_coloredlogs_available, is_datasets_available, is_detectron2_available, diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index e9f1bae358f3ac..d564c08e9fc5cc 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -3020,6 +3020,44 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class MarkupLMForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class MarkupLMPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/dummy_tokenizers_objects.py b/src/transformers/utils/dummy_tokenizers_objects.py index 7a469bdff36126..8a24d9bea6b2c1 100644 --- a/src/transformers/utils/dummy_tokenizers_objects.py +++ b/src/transformers/utils/dummy_tokenizers_objects.py @@ -234,6 +234,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tokenizers"]) +class MarkupLMTokenizerFast(metaclass=DummyObject): + _backends = ["tokenizers"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tokenizers"]) + + class MBartTokenizerFast(metaclass=DummyObject): _backends = ["tokenizers"] diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index f2cf5ffd9bff41..16616e0772dd97 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -386,6 +386,10 @@ def is_torch_fx_available(): return _torch_fx_available +def is_bs4_available(): + return importlib.util.find_spec("bs4") is not None + + def is_torch_onnx_dict_inputs_support_available(): return _torch_onnx_dict_inputs_support_available @@ -748,6 +752,12 @@ def is_ccl_available(): installation page https://www.tensorflow.org/install that match your environment. """ +# docstyle-ignore +BS4_IMPORT_ERROR = """ +{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip: +`pip install beautifulsoup4` +""" + # docstyle-ignore SKLEARN_IMPORT_ERROR = """ @@ -889,6 +899,7 @@ def is_ccl_available(): BACKENDS_MAPPING = OrderedDict( [ + ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)), ("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)), ("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)), ("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)), diff --git a/tests/models/markuplm/__init__.py b/tests/models/markuplm/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/markuplm/test_feature_extraction_markuplm.py b/tests/models/markuplm/test_feature_extraction_markuplm.py new file mode 100644 index 00000000000000..4541cb9480bbe8 --- /dev/null +++ b/tests/models/markuplm/test_feature_extraction_markuplm.py @@ -0,0 +1,114 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +from transformers.testing_utils import require_bs4 +from transformers.utils import is_bs4_available + +from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin + + +if is_bs4_available(): + from transformers import MarkupLMFeatureExtractor + + +class MarkupLMFeatureExtractionTester(unittest.TestCase): + def __init__(self, parent): + self.parent = parent + + def prepare_feat_extract_dict(self): + return {} + + +def get_html_strings(): + html_string_1 = """ + + + sample document + + + +
+ Goog +

This is one header

+

This is a another Header

+

Travel from +

+ SFO to JFK +
+ on May 2, 2015 at 2:00 pm. For details go to confirm.com +


+
+

Traveler name is +

John Doe

+

""" + + html_string_2 = """ + + + + +

My First Heading

+

My first paragraph.

+ + + + """ + + return [html_string_1, html_string_2] + + +@require_bs4 +class MarkupLMFeatureExtractionTest(FeatureExtractionSavingTestMixin, unittest.TestCase): + feature_extraction_class = MarkupLMFeatureExtractor if is_bs4_available() else None + + def setUp(self): + self.feature_extract_tester = MarkupLMFeatureExtractionTester(self) + + @property + def feat_extract_dict(self): + return self.feature_extract_tester.prepare_feat_extract_dict() + + def test_call(self): + # Initialize feature_extractor + feature_extractor = self.feature_extraction_class() + + # Test not batched input + html_string = get_html_strings()[0] + encoding = feature_extractor(html_string) + + # fmt: off + expected_nodes = [['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']] + expected_xpaths = [['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']] + # fmt: on + + self.assertEqual(encoding.nodes, expected_nodes) + self.assertEqual(encoding.xpaths, expected_xpaths) + + # Test batched + html_strings = get_html_strings() + encoding = feature_extractor(html_strings) + + # fmt: off + expected_nodes = expected_nodes + [['My First Heading', 'My first paragraph.']] + expected_xpaths = expected_xpaths + [['/html/body/h1', '/html/body/p']] + + self.assertEqual(len(encoding.nodes), 2) + self.assertEqual(len(encoding.xpaths), 2) + + self.assertEqual(encoding.nodes, expected_nodes) + self.assertEqual(encoding.xpaths, expected_xpaths) diff --git a/tests/models/markuplm/test_modeling_markuplm.py b/tests/models/markuplm/test_modeling_markuplm.py new file mode 100644 index 00000000000000..8fa1bb440a55f4 --- /dev/null +++ b/tests/models/markuplm/test_modeling_markuplm.py @@ -0,0 +1,364 @@ +# coding=utf-8 +# Copyright 2022 The Hugging Face Team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +from transformers import MarkupLMConfig, is_torch_available +from transformers.testing_utils import require_torch, slow, torch_device +from transformers.utils import cached_property + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, ids_tensor + + +if is_torch_available(): + import torch + + from transformers import ( + MarkupLMForQuestionAnswering, + MarkupLMForSequenceClassification, + MarkupLMForTokenClassification, + MarkupLMModel, + ) + +# TODO check dependencies +from transformers import MarkupLMFeatureExtractor, MarkupLMProcessor, MarkupLMTokenizer + + +class MarkupLMModelTester: + """You can also import this e.g from .test_modeling_markuplm import MarkupLMModelTester""" + + def __init__( + self, + parent, + batch_size=13, + seq_length=7, + is_training=True, + use_input_mask=True, + use_token_type_ids=True, + use_labels=True, + vocab_size=99, + hidden_size=32, + num_hidden_layers=5, + num_attention_heads=4, + intermediate_size=37, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=16, + type_sequence_label_size=2, + initializer_range=0.02, + num_labels=3, + scope=None, + max_xpath_tag_unit_embeddings=20, + max_xpath_subs_unit_embeddings=30, + tag_pad_id=2, + subs_pad_id=2, + max_depth=10, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_input_mask = use_input_mask + self.use_token_type_ids = use_token_type_ids + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.type_sequence_label_size = type_sequence_label_size + self.initializer_range = initializer_range + self.num_labels = num_labels + self.scope = scope + self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings + self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings + self.tag_pad_id = tag_pad_id + self.subs_pad_id = subs_pad_id + self.max_depth = max_depth + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + xpath_tags_seq = ids_tensor( + [self.batch_size, self.seq_length, self.max_depth], self.max_xpath_tag_unit_embeddings + ) + + xpath_subs_seq = ids_tensor( + [self.batch_size, self.seq_length, self.max_depth], self.max_xpath_subs_unit_embeddings + ) + + input_mask = None + if self.use_input_mask: + input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) + + token_type_ids = None + if self.use_token_type_ids: + token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) + + sequence_labels = None + token_labels = None + if self.use_labels: + sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) + + config = self.get_config() + + return ( + config, + input_ids, + xpath_tags_seq, + xpath_subs_seq, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + ) + + def get_config(self): + return MarkupLMConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + initializer_range=self.initializer_range, + max_xpath_tag_unit_embeddings=self.max_xpath_tag_unit_embeddings, + max_xpath_subs_unit_embeddings=self.max_xpath_subs_unit_embeddings, + tag_pad_id=self.tag_pad_id, + subs_pad_id=self.subs_pad_id, + max_depth=self.max_depth, + ) + + def create_and_check_model( + self, + config, + input_ids, + xpath_tags_seq, + xpath_subs_seq, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + ): + model = MarkupLMModel(config=config) + model.to(torch_device) + model.eval() + print("Configs:", model.config.tag_pad_id, model.config.subs_pad_id) + result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) + result = model(input_ids, token_type_ids=token_type_ids) + result = model(input_ids) + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def create_and_check_for_sequence_classification( + self, + config, + input_ids, + xpath_tags_seq, + xpath_subs_seq, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + ): + config.num_labels = self.num_labels + model = MarkupLMForSequenceClassification(config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + xpath_tags_seq=xpath_tags_seq, + xpath_subs_seq=xpath_subs_seq, + attention_mask=input_mask, + token_type_ids=token_type_ids, + labels=sequence_labels, + ) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) + + def create_and_check_for_token_classification( + self, + config, + input_ids, + xpath_tags_seq, + xpath_subs_seq, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + ): + config.num_labels = self.num_labels + model = MarkupLMForTokenClassification(config=config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + xpath_tags_seq=xpath_tags_seq, + xpath_subs_seq=xpath_subs_seq, + attention_mask=input_mask, + token_type_ids=token_type_ids, + labels=token_labels, + ) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) + + def create_and_check_for_question_answering( + self, + config, + input_ids, + xpath_tags_seq, + xpath_subs_seq, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + ): + model = MarkupLMForQuestionAnswering(config=config) + model.to(torch_device) + model.eval() + result = model( + input_ids, + xpath_tags_seq=xpath_tags_seq, + xpath_subs_seq=xpath_subs_seq, + attention_mask=input_mask, + token_type_ids=token_type_ids, + start_positions=sequence_labels, + end_positions=sequence_labels, + ) + self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) + self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + ( + config, + input_ids, + xpath_tags_seq, + xpath_subs_seq, + token_type_ids, + input_mask, + sequence_labels, + token_labels, + ) = config_and_inputs + inputs_dict = { + "input_ids": input_ids, + "xpath_tags_seq": xpath_tags_seq, + "xpath_subs_seq": xpath_subs_seq, + "token_type_ids": token_type_ids, + "attention_mask": input_mask, + } + return config, inputs_dict + + +@require_torch +class MarkupLMModelTest(ModelTesterMixin, unittest.TestCase): + all_model_classes = ( + ( + MarkupLMModel, + MarkupLMForSequenceClassification, + MarkupLMForTokenClassification, + MarkupLMForQuestionAnswering, + ) + if is_torch_available() + else None + ) + + def setUp(self): + self.model_tester = MarkupLMModelTester(self) + self.config_tester = ConfigTester(self, config_class=MarkupLMConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_for_sequence_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) + + def test_for_token_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_token_classification(*config_and_inputs) + + def test_for_question_answering(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_question_answering(*config_and_inputs) + + +def prepare_html_string(): + html_string = """ + + + + Page Title + + + +

This is a Heading

+

This is a paragraph.

+ + + + """ + + return html_string + + +@require_torch +class MarkupLMModelIntegrationTest(unittest.TestCase): + @cached_property + def default_processor(self): + # TODO use from_pretrained here + feature_extractor = MarkupLMFeatureExtractor() + tokenizer = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base") + + return MarkupLMProcessor(feature_extractor, tokenizer) + + @slow + def test_forward_pass_no_head(self): + model = MarkupLMModel.from_pretrained("microsoft/markuplm-base").to(torch_device) + + processor = self.default_processor + + inputs = processor(prepare_html_string(), return_tensors="pt") + inputs = inputs.to(torch_device) + + # forward pass + with torch.no_grad(): + outputs = model(**inputs) + + # verify the last hidden states + expected_shape = torch.Size([1, 14, 768]) + self.assertEqual(outputs.last_hidden_state.shape, expected_shape) + + expected_slice = torch.tensor( + [[0.0267, -0.1289, 0.4930], [-0.2376, -0.0342, 0.2381], [-0.0329, -0.3785, 0.0263]] + ).to(torch_device) + + self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3], expected_slice, atol=1e-4)) diff --git a/tests/models/markuplm/test_processor_markuplm.py b/tests/models/markuplm/test_processor_markuplm.py new file mode 100644 index 00000000000000..6870a63336a2af --- /dev/null +++ b/tests/models/markuplm/test_processor_markuplm.py @@ -0,0 +1,451 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +import shutil +import tempfile +import unittest +from typing import List + +from transformers import ( + MarkupLMProcessor, + MarkupLMTokenizer, + PreTrainedTokenizer, + PreTrainedTokenizerBase, + PreTrainedTokenizerFast, +) +from transformers.models.markuplm.tokenization_markuplm import VOCAB_FILES_NAMES +from transformers.testing_utils import require_bs4, require_tokenizers, require_torch, slow +from transformers.utils import FEATURE_EXTRACTOR_NAME, cached_property, is_bs4_available, is_tokenizers_available + + +if is_bs4_available(): + from transformers import MarkupLMFeatureExtractor + +if is_tokenizers_available(): + from transformers import MarkupLMTokenizerFast + + +@require_bs4 +@require_tokenizers +class MarkupLMProcessorTest(unittest.TestCase): + tokenizer_class = MarkupLMTokenizer + rust_tokenizer_class = MarkupLMTokenizerFast + + def setUp(self): + # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt + # fmt: off + vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "\u0120hello", "\u0120world", "",] # noqa + # fmt: on + self.tmpdirname = tempfile.mkdtemp() + vocab_tokens = dict(zip(vocab, range(len(vocab)))) + merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] + self.tags_dict = {"a": 0, "abbr": 1, "acronym": 2, "address": 3} + self.special_tokens_map = {"unk_token": ""} + + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) + self.tokenizer_config_file = os.path.join(self.tmpdirname, "tokenizer_config.json") + + with open(self.vocab_file, "w", encoding="utf-8") as fp: + fp.write(json.dumps(vocab_tokens) + "\n") + with open(self.merges_file, "w", encoding="utf-8") as fp: + fp.write("\n".join(merges)) + with open(self.tokenizer_config_file, "w", encoding="utf-8") as fp: + fp.write(json.dumps({"tags_dict": self.tags_dict})) + + feature_extractor_map = {"feature_extractor_type": "MarkupLMFeatureExtractor"} + self.feature_extraction_file = os.path.join(self.tmpdirname, FEATURE_EXTRACTOR_NAME) + with open(self.feature_extraction_file, "w", encoding="utf-8") as fp: + fp.write(json.dumps(feature_extractor_map) + "\n") + + def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer: + return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) + + def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast: + return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) + + def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]: + return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)] + + def get_feature_extractor(self, **kwargs): + return MarkupLMFeatureExtractor.from_pretrained(self.tmpdirname, **kwargs) + + def tearDown(self): + shutil.rmtree(self.tmpdirname) + + def test_save_load_pretrained_default(self): + feature_extractor = self.get_feature_extractor() + tokenizers = self.get_tokenizers() + for tokenizer in tokenizers: + processor = MarkupLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) + + processor.save_pretrained(self.tmpdirname) + processor = MarkupLMProcessor.from_pretrained(self.tmpdirname) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) + self.assertIsInstance(processor.tokenizer, (MarkupLMTokenizer, MarkupLMTokenizerFast)) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) + self.assertIsInstance(processor.feature_extractor, MarkupLMFeatureExtractor) + + def test_save_load_pretrained_additional_features(self): + processor = MarkupLMProcessor(feature_extractor=self.get_feature_extractor(), tokenizer=self.get_tokenizer()) + processor.save_pretrained(self.tmpdirname) + + # slow tokenizer + tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") + feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30) + + processor = MarkupLMProcessor.from_pretrained( + self.tmpdirname, use_fast=False, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30 + ) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) + self.assertIsInstance(processor.tokenizer, MarkupLMTokenizer) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) + self.assertIsInstance(processor.feature_extractor, MarkupLMFeatureExtractor) + + # fast tokenizer + tokenizer_add_kwargs = self.get_rust_tokenizer(bos_token="(BOS)", eos_token="(EOS)") + feature_extractor_add_kwargs = self.get_feature_extractor(do_resize=False, size=30) + + processor = MarkupLMProcessor.from_pretrained( + self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_resize=False, size=30 + ) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) + self.assertIsInstance(processor.tokenizer, MarkupLMTokenizerFast) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) + self.assertIsInstance(processor.feature_extractor, MarkupLMFeatureExtractor) + + +# different use cases tests +@require_bs4 +@require_torch +class MarkupLMProcessorIntegrationTests(unittest.TestCase): + @cached_property + def get_html_strings(self): + html_string_1 = """ + + + + Hello world + + + +

Welcome

+

Here is my website.

+ + + """ + + html_string_2 = """ + + + + +

HTML Images

+

HTML images are defined with the img tag:

+ + W3Schools.com + + + + """ + + return [html_string_1, html_string_2] + + @cached_property + def get_tokenizers(self): + slow_tokenizer = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base") + fast_tokenizer = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base", from_slow=True) + return [slow_tokenizer, fast_tokenizer] + + @slow + def test_processor_case_1(self): + # case 1: web page classification (training, inference) + token classification (inference) + + feature_extractor = MarkupLMFeatureExtractor() + tokenizers = self.get_tokenizers + html_strings = self.get_html_strings + + for tokenizer in tokenizers: + processor = MarkupLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) + + # not batched + inputs = processor(html_strings[0], return_tensors="pt") + + # verify keys + expected_keys = ["attention_mask", "input_ids", "token_type_ids", "xpath_subs_seq", "xpath_tags_seq"] + actual_keys = sorted(list(inputs.keys())) + self.assertListEqual(actual_keys, expected_keys) + + # verify input_ids + expected = [0, 31414, 232, 25194, 11773, 16, 127, 998, 4, 2] + self.assertSequenceEqual(inputs.input_ids.squeeze().tolist(), expected) + + # batched + inputs = processor(html_strings, padding=True, return_tensors="pt") + + # verify keys + expected_keys = ["attention_mask", "input_ids", "token_type_ids", "xpath_subs_seq", "xpath_tags_seq"] + actual_keys = sorted(list(inputs.keys())) + self.assertListEqual(actual_keys, expected_keys) + + # verify input_ids + expected = [0, 48085, 2209, 48085, 3156, 32, 6533, 19, 5, 48599, 6694, 35, 2] + self.assertSequenceEqual(inputs.input_ids[1].tolist(), expected) + + @slow + def test_processor_case_2(self): + # case 2: web page classification (training, inference) + token classification (inference), parse_html=False + + feature_extractor = MarkupLMFeatureExtractor() + tokenizers = self.get_tokenizers + + for tokenizer in tokenizers: + processor = MarkupLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) + processor.parse_html = False + + # not batched + nodes = ["hello", "world", "how", "are"] + xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] + inputs = processor(nodes=nodes, xpaths=xpaths, return_tensors="pt") + + # verify keys + expected_keys = ["attention_mask", "input_ids", "token_type_ids", "xpath_subs_seq", "xpath_tags_seq"] + actual_keys = list(inputs.keys()) + for key in expected_keys: + self.assertIn(key, actual_keys) + + # verify input_ids + expected_decoding = "helloworldhoware" + decoding = processor.decode(inputs.input_ids.squeeze().tolist()) + self.assertSequenceEqual(decoding, expected_decoding) + + # batched + nodes = [["hello", "world"], ["my", "name", "is"]] + xpaths = [ + ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"], + ["html/body", "html/body/div", "html/body"], + ] + inputs = processor(nodes=nodes, xpaths=xpaths, padding=True, return_tensors="pt") + + # verify keys + expected_keys = ["attention_mask", "input_ids", "token_type_ids", "xpath_subs_seq", "xpath_tags_seq"] + actual_keys = sorted(list(inputs.keys())) + self.assertListEqual(actual_keys, expected_keys) + + # verify input_ids + expected_decoding = "helloworld" + decoding = processor.decode(inputs.input_ids[0].tolist()) + self.assertSequenceEqual(decoding, expected_decoding) + + @slow + def test_processor_case_3(self): + # case 3: token classification (training), parse_html=False + + feature_extractor = MarkupLMFeatureExtractor() + tokenizers = self.get_tokenizers + + for tokenizer in tokenizers: + processor = MarkupLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) + processor.parse_html = False + + # not batched + nodes = ["hello", "world", "how", "are"] + xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] + node_labels = [1, 2, 2, 1] + inputs = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt") + + # verify keys + expected_keys = [ + "attention_mask", + "input_ids", + "labels", + "token_type_ids", + "xpath_subs_seq", + "xpath_tags_seq", + ] + actual_keys = sorted(list(inputs.keys())) + self.assertListEqual(actual_keys, expected_keys) + + # verify input_ids + expected_ids = [0, 42891, 8331, 9178, 1322, 2] + self.assertSequenceEqual(inputs.input_ids[0].tolist(), expected_ids) + + # verify labels + expected_labels = [-100, 1, 2, 2, 1, -100] + self.assertListEqual(inputs.labels.squeeze().tolist(), expected_labels) + + # batched + nodes = [["hello", "world"], ["my", "name", "is"]] + xpaths = [ + ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"], + ["html/body", "html/body/div", "html/body"], + ] + node_labels = [[1, 2], [6, 3, 10]] + inputs = processor( + nodes=nodes, + xpaths=xpaths, + node_labels=node_labels, + padding="max_length", + max_length=20, + truncation=True, + return_tensors="pt", + ) + + # verify keys + expected_keys = [ + "attention_mask", + "input_ids", + "labels", + "token_type_ids", + "xpath_subs_seq", + "xpath_tags_seq", + ] + actual_keys = sorted(list(inputs.keys())) + self.assertListEqual(actual_keys, expected_keys) + + # verify input_ids + expected_ids = [0, 4783, 13650, 354, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + self.assertSequenceEqual(inputs.input_ids[1].tolist(), expected_ids) + + # verify xpath_tags_seq + # fmt: off + expected_xpaths_tags_seq = [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]] # noqa: + # fmt: on + self.assertSequenceEqual(inputs.xpath_tags_seq[1].tolist(), expected_xpaths_tags_seq) + + # verify labels + # fmt: off + expected_labels = [-100, 6, 3, 10, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100] + # fmt: on + self.assertListEqual(inputs.labels[1].tolist(), expected_labels) + + @slow + def test_processor_case_4(self): + # case 4: question answering (inference), parse_html=True + + feature_extractor = MarkupLMFeatureExtractor() + tokenizers = self.get_tokenizers + html_strings = self.get_html_strings + + for tokenizer in tokenizers: + processor = MarkupLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) + + # not batched + question = "What's his name?" + inputs = processor(html_strings[0], questions=question, return_tensors="pt") + + # verify keys + expected_keys = ["attention_mask", "input_ids", "token_type_ids", "xpath_subs_seq", "xpath_tags_seq"] + actual_keys = sorted(list(inputs.keys())) + self.assertListEqual(actual_keys, expected_keys) + + # verify input_ids + # fmt: off + expected_decoding = "What's his name?Hello worldWelcomeHere is my website." # noqa: E231 + # fmt: on + decoding = processor.decode(inputs.input_ids.squeeze().tolist()) + self.assertSequenceEqual(decoding, expected_decoding) + + # batched + questions = ["How old is he?", "what's the time"] + inputs = processor( + html_strings, + questions=questions, + padding="max_length", + max_length=20, + truncation=True, + return_tensors="pt", + ) + + # verify keys + expected_keys = ["attention_mask", "input_ids", "token_type_ids", "xpath_subs_seq", "xpath_tags_seq"] + actual_keys = sorted(list(inputs.keys())) + self.assertListEqual(actual_keys, expected_keys) + + # verify input_ids + expected_decoding = ( + "what's the timeHTML ImagesHTML images are defined with the img tag:" + ) + decoding = processor.decode(inputs.input_ids[1].tolist()) + self.assertSequenceEqual(decoding, expected_decoding) + + # verify xpath_subs_seq + # fmt: off + expected_xpath_subs_seq = [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 99, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 99, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 148, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 148, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 148, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 148, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 148, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 148, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 148, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 148, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 148, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]] # noqa: E231 + # fmt: on + self.assertListEqual(inputs.xpath_subs_seq[1].tolist(), expected_xpath_subs_seq) + + @slow + def test_processor_case_5(self): + # case 5: question answering (inference), parse_html=False + + feature_extractor = MarkupLMFeatureExtractor(parse_html=False) + tokenizers = self.get_tokenizers + + for tokenizer in tokenizers: + processor = MarkupLMProcessor(feature_extractor=feature_extractor, tokenizer=tokenizer) + processor.parse_html = False + + # not batched + question = "What's his name?" + nodes = ["hello", "world", "how", "are"] + xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] + inputs = processor(nodes=nodes, xpaths=xpaths, questions=question, return_tensors="pt") + + # verify keys + expected_keys = ["attention_mask", "input_ids", "token_type_ids", "xpath_subs_seq", "xpath_tags_seq"] + actual_keys = sorted(list(inputs.keys())) + self.assertListEqual(actual_keys, expected_keys) + + # verify input_ids + expected_decoding = "What's his name?helloworldhoware" + decoding = processor.decode(inputs.input_ids.squeeze().tolist()) + self.assertSequenceEqual(decoding, expected_decoding) + + # batched + questions = ["How old is he?", "what's the time"] + nodes = [["hello", "world"], ["my", "name", "is"]] + xpaths = [ + ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"], + ["html/body", "html/body/div", "html/body"], + ] + inputs = processor(nodes=nodes, xpaths=xpaths, questions=questions, padding=True, return_tensors="pt") + + # verify keys + expected_keys = ["attention_mask", "input_ids", "token_type_ids", "xpath_subs_seq", "xpath_tags_seq"] + actual_keys = sorted(list(inputs.keys())) + self.assertListEqual(actual_keys, expected_keys) + + # verify input_ids + expected_decoding = "How old is he?helloworld" + decoding = processor.decode(inputs.input_ids[0].tolist()) + self.assertSequenceEqual(decoding, expected_decoding) + + expected_decoding = "what's the timemynameis" + decoding = processor.decode(inputs.input_ids[1].tolist()) + self.assertSequenceEqual(decoding, expected_decoding) + + # verify xpath_subs_seq + # fmt: off + expected_xpath_subs_seq = [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]] # noqa: E231 + # fmt: on + self.assertListEqual(inputs.xpath_subs_seq[1].tolist()[-5:], expected_xpath_subs_seq) diff --git a/tests/models/markuplm/test_tokenization_markuplm.py b/tests/models/markuplm/test_tokenization_markuplm.py new file mode 100644 index 00000000000000..e59934e4d086b4 --- /dev/null +++ b/tests/models/markuplm/test_tokenization_markuplm.py @@ -0,0 +1,2306 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import json +import os +import re +import shutil +import tempfile +import unittest +from typing import List + +from transformers import ( + AddedToken, + MarkupLMTokenizerFast, + SpecialTokensMixin, + is_tf_available, + is_torch_available, + logging, +) +from transformers.models.markuplm.tokenization_markuplm import VOCAB_FILES_NAMES, MarkupLMTokenizer +from transformers.testing_utils import is_pt_tf_cross_test, require_tokenizers, require_torch, slow + +from ...test_tokenization_common import SMALL_TRAINING_CORPUS, TokenizerTesterMixin, merge_model_tokenizer_mappings + + +logger = logging.get_logger(__name__) + + +@require_tokenizers +class MarkupLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): + tokenizer_class = MarkupLMTokenizer + rust_tokenizer_class = MarkupLMTokenizerFast + test_rust_tokenizer = True + from_pretrained_kwargs = {"cls_token": ""} + test_seq2seq = False + + def setUp(self): + super().setUp() + + # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt + # fmt: off + vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "\u0120hello", "\u0120world", "",] # noqa + # fmt: on + vocab_tokens = dict(zip(vocab, range(len(vocab)))) + merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] + self.tags_dict = {"a": 0, "abbr": 1, "acronym": 2, "address": 3} + self.special_tokens_map = {"unk_token": ""} + + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) + self.tokenizer_config_file = os.path.join(self.tmpdirname, "tokenizer_config.json") + + with open(self.vocab_file, "w", encoding="utf-8") as fp: + fp.write(json.dumps(vocab_tokens) + "\n") + with open(self.merges_file, "w", encoding="utf-8") as fp: + fp.write("\n".join(merges)) + with open(self.tokenizer_config_file, "w", encoding="utf-8") as fp: + fp.write(json.dumps({"tags_dict": self.tags_dict})) + + def get_nodes_and_xpaths(self): + nodes = ["hello", "world"] + xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"] + + return nodes, xpaths + + def get_nodes_and_xpaths_batch(self): + nodes = [["hello world", "running"], ["hello my name is bob"]] + xpaths = [ + ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"], + ["/html/body/div/li[2]/div/span"], + ] + + return nodes, xpaths + + def get_question_nodes_and_xpaths(self): + question = "what's his name?" + nodes = ["hello world"] + xpaths = ["/html/body/div/li[1]/div/span"] # , "/html/body/div/li[1]/div/span"] + + return question, nodes, xpaths + + def get_question_nodes_and_xpaths_batch(self): + questions = ["what's his name?", "how is he called?"] + nodes = [["hello world", "running"], ["hello my name is bob"]] + xpaths = [ + ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span"], + ["/html/body/div/li[2]/div/span"], + ] + + return questions, nodes, xpaths + + def get_input_output_texts(self, tokenizer): + input_text = "UNwant\u00E9d,running" + output_text = "unwanted, running" + return input_text, output_text + + def test_add_special_tokens(self): + tokenizers: List[MarkupLMTokenizer] = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + special_token = "[SPECIAL_TOKEN]" + special_token_xpath = "/html/body/div/li[1]/div/span" + + tokenizer.add_special_tokens({"cls_token": special_token}) + encoded_special_token = tokenizer.encode( + [special_token], xpaths=[special_token_xpath], add_special_tokens=False + ) + self.assertEqual(len(encoded_special_token), 1) + + decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True) + self.assertTrue(special_token not in decoded) + + def test_add_tokens_tokenizer(self): + tokenizers: List[MarkupLMTokenizer] = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + vocab_size = tokenizer.vocab_size + all_size = len(tokenizer) + + self.assertNotEqual(vocab_size, 0) + + # We usually have added tokens from the start in tests because our vocab fixtures are + # smaller than the original vocabs - let's not assert this + # self.assertEqual(vocab_size, all_size) + + new_toks = ["aaaaa", "bbbbbb", "cccccccccdddddddd"] + added_toks = tokenizer.add_tokens(new_toks) + vocab_size_2 = tokenizer.vocab_size + all_size_2 = len(tokenizer) + + self.assertNotEqual(vocab_size_2, 0) + self.assertEqual(vocab_size, vocab_size_2) + self.assertEqual(added_toks, len(new_toks)) + self.assertEqual(all_size_2, all_size + len(new_toks)) + + nodes = "aaaaa bbbbbb low cccccccccdddddddd l".split() + xpaths = ["/html/body/div/li[1]/div/span" for _ in range(len(nodes))] + + tokens = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + + self.assertGreaterEqual(len(tokens), 4) + self.assertGreater(tokens[0], tokenizer.vocab_size - 1) + self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) + + new_toks_2 = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} + added_toks_2 = tokenizer.add_special_tokens(new_toks_2) + vocab_size_3 = tokenizer.vocab_size + all_size_3 = len(tokenizer) + + self.assertNotEqual(vocab_size_3, 0) + self.assertEqual(vocab_size, vocab_size_3) + self.assertEqual(added_toks_2, len(new_toks_2)) + self.assertEqual(all_size_3, all_size_2 + len(new_toks_2)) + + nodes = ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l".split() + xpaths = ["/html/body/div/li[1]/div/span" for _ in range(len(nodes))] + + tokens = tokenizer.encode( + nodes, + xpaths=xpaths, + add_special_tokens=False, + ) + + self.assertGreaterEqual(len(tokens), 6) + self.assertGreater(tokens[0], tokenizer.vocab_size - 1) + self.assertGreater(tokens[0], tokens[1]) + self.assertGreater(tokens[-2], tokenizer.vocab_size - 1) + self.assertGreater(tokens[-2], tokens[-3]) + self.assertEqual(tokens[0], tokenizer.eos_token_id) + self.assertEqual(tokens[-2], tokenizer.pad_token_id) + + @require_tokenizers + def test_encode_decode_with_spaces(self): + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths() + + new_toks = [AddedToken("[ABC]", normalized=False), AddedToken("[DEF]", normalized=False)] + tokenizer.add_tokens(new_toks) + input = "[ABC][DEF][ABC][DEF]" + if self.space_between_special_tokens: + output = "[ABC] [DEF] [ABC] [DEF]" + else: + output = input + encoded = tokenizer.encode(input.split(), xpaths=xpaths, add_special_tokens=False) + decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens) + self.assertIn(decoded, [output, output.lower()]) + + @unittest.skip("Not implemented") + def test_right_and_left_truncation(self): + pass + + def test_encode_plus_with_padding(self): + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths() + + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer, nodes) + + padding_size = 10 + padding_idx = tokenizer.pad_token_id + + encoded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, return_special_tokens_mask=True) + input_ids = encoded_sequence["input_ids"] + special_tokens_mask = encoded_sequence["special_tokens_mask"] + sequence_length = len(input_ids) + + # Test 'longest' and 'no_padding' don't do anything + tokenizer.padding_side = "right" + + not_padded_sequence = tokenizer.encode_plus( + nodes, + xpaths=xpaths, + padding=False, + return_special_tokens_mask=True, + ) + not_padded_input_ids = not_padded_sequence["input_ids"] + + not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] + not_padded_sequence_length = len(not_padded_input_ids) + + self.assertTrue(sequence_length == not_padded_sequence_length) + self.assertTrue(input_ids == not_padded_input_ids) + self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask) + + not_padded_sequence = tokenizer.encode_plus( + nodes, + xpaths=xpaths, + padding=False, + return_special_tokens_mask=True, + ) + not_padded_input_ids = not_padded_sequence["input_ids"] + + not_padded_special_tokens_mask = not_padded_sequence["special_tokens_mask"] + not_padded_sequence_length = len(not_padded_input_ids) + + self.assertTrue(sequence_length == not_padded_sequence_length) + self.assertTrue(input_ids == not_padded_input_ids) + self.assertTrue(special_tokens_mask == not_padded_special_tokens_mask) + + # Test right padding + tokenizer.padding_side = "right" + + right_padded_sequence = tokenizer.encode_plus( + nodes, + xpaths=xpaths, + max_length=sequence_length + padding_size, + padding="max_length", + return_special_tokens_mask=True, + ) + right_padded_input_ids = right_padded_sequence["input_ids"] + + right_padded_special_tokens_mask = right_padded_sequence["special_tokens_mask"] + right_padded_sequence_length = len(right_padded_input_ids) + + self.assertTrue(sequence_length + padding_size == right_padded_sequence_length) + self.assertTrue(input_ids + [padding_idx] * padding_size == right_padded_input_ids) + self.assertTrue(special_tokens_mask + [1] * padding_size == right_padded_special_tokens_mask) + + # Test left padding + tokenizer.padding_side = "left" + left_padded_sequence = tokenizer.encode_plus( + nodes, + xpaths=xpaths, + max_length=sequence_length + padding_size, + padding="max_length", + return_special_tokens_mask=True, + ) + left_padded_input_ids = left_padded_sequence["input_ids"] + left_padded_special_tokens_mask = left_padded_sequence["special_tokens_mask"] + left_padded_sequence_length = len(left_padded_input_ids) + + self.assertTrue(sequence_length + padding_size == left_padded_sequence_length) + self.assertTrue([padding_idx] * padding_size + input_ids == left_padded_input_ids) + self.assertTrue([1] * padding_size + special_tokens_mask == left_padded_special_tokens_mask) + + if "token_type_ids" in tokenizer.model_input_names: + token_type_ids = encoded_sequence["token_type_ids"] + left_padded_token_type_ids = left_padded_sequence["token_type_ids"] + right_padded_token_type_ids = right_padded_sequence["token_type_ids"] + + assert token_type_ids + [0] * padding_size == right_padded_token_type_ids + assert [0] * padding_size + token_type_ids == left_padded_token_type_ids + + if "attention_mask" in tokenizer.model_input_names: + attention_mask = encoded_sequence["attention_mask"] + right_padded_attention_mask = right_padded_sequence["attention_mask"] + left_padded_attention_mask = left_padded_sequence["attention_mask"] + + self.assertTrue(attention_mask + [0] * padding_size == right_padded_attention_mask) + self.assertTrue([0] * padding_size + attention_mask == left_padded_attention_mask) + + def test_internal_consistency(self): + tokenizers = self.get_tokenizers() + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths() + + tokens = [] + for word in nodes: + tokens.extend(tokenizer.tokenize(word)) + ids = tokenizer.convert_tokens_to_ids(tokens) + ids_2 = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + self.assertListEqual(ids, ids_2) + + tokens_2 = tokenizer.convert_ids_to_tokens(ids) + self.assertNotEqual(len(tokens_2), 0) + text_2 = tokenizer.decode(ids) + self.assertIsInstance(text_2, str) + + def test_mask_output(self): + tokenizers = self.get_tokenizers(fast=False, do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths() + + if ( + tokenizer.build_inputs_with_special_tokens.__qualname__.split(".")[0] != "PreTrainedTokenizer" + and "token_type_ids" in tokenizer.model_input_names + ): + information = tokenizer.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True) + sequences, mask = information["input_ids"], information["token_type_ids"] + self.assertEqual(len(sequences), len(mask)) + + def test_number_of_added_tokens(self): + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + # test 1: single sequence + nodes, xpaths = self.get_nodes_and_xpaths() + + sequences = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + attached_sequences = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=True) + + # Method is implemented (e.g. not GPT-2) + if len(attached_sequences) != 2: + self.assertEqual( + tokenizer.num_special_tokens_to_add(pair=False), len(attached_sequences) - len(sequences) + ) + + # test 2: two sequences + question, nodes, xpaths = self.get_question_nodes_and_xpaths() + + sequences = tokenizer.encode(question, nodes, xpaths=xpaths, add_special_tokens=False) + attached_sequences = tokenizer.encode(question, nodes, xpaths=xpaths, add_special_tokens=True) + + # Method is implemented (e.g. not GPT-2) + if len(attached_sequences) != 2: + self.assertEqual( + tokenizer.num_special_tokens_to_add(pair=True), len(attached_sequences) - len(sequences) + ) + + def test_padding_to_max_length(self): + """We keep this test for backward compatibility but it should be removed when `pad_to_max_length` will be deprecated""" + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths() + padding_size = 10 + + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer, nodes) + + padding_idx = tokenizer.pad_token_id + + # Check that it correctly pads when a maximum length is specified along with the padding flag set to True + tokenizer.padding_side = "right" + encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths) + sequence_length = len(encoded_sequence) + # FIXME: the next line should be padding(max_length) to avoid warning + padded_sequence = tokenizer.encode( + nodes, xpaths=xpaths, max_length=sequence_length + padding_size, pad_to_max_length=True + ) + padded_sequence_length = len(padded_sequence) + assert sequence_length + padding_size == padded_sequence_length + assert encoded_sequence + [padding_idx] * padding_size == padded_sequence + + # Check that nothing is done when a maximum length is not specified + encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths) + sequence_length = len(encoded_sequence) + + tokenizer.padding_side = "right" + padded_sequence_right = tokenizer.encode(nodes, xpaths=xpaths, pad_to_max_length=True) + padded_sequence_right_length = len(padded_sequence_right) + assert sequence_length == padded_sequence_right_length + assert encoded_sequence == padded_sequence_right + + def test_padding(self, max_length=50): + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) + pad_token_id = tokenizer_p.pad_token_id + + # Encode - Simple input + nodes, xpaths = self.get_nodes_and_xpaths() + input_r = tokenizer_r.encode(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True) + input_p = tokenizer_p.encode(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True) + self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) + input_r = tokenizer_r.encode(nodes, xpaths=xpaths, max_length=max_length, padding="max_length") + input_p = tokenizer_p.encode(nodes, xpaths=xpaths, max_length=max_length, padding="max_length") + self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) + + input_r = tokenizer_r.encode(nodes, xpaths=xpaths, padding="longest") + input_p = tokenizer_p.encode(nodes, xpaths=xpaths, padding=True) + self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) + + # Encode - Pair input + question, nodes, xpaths = self.get_question_nodes_and_xpaths() + input_r = tokenizer_r.encode( + question, nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True + ) + input_p = tokenizer_p.encode( + question, nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True + ) + self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) + input_r = tokenizer_r.encode( + question, nodes, xpaths=xpaths, max_length=max_length, padding="max_length" + ) + input_p = tokenizer_p.encode( + question, nodes, xpaths=xpaths, max_length=max_length, padding="max_length" + ) + self.assert_padded_input_match(input_r, input_p, max_length, pad_token_id) + input_r = tokenizer_r.encode(question, nodes, xpaths=xpaths, padding=True) + input_p = tokenizer_p.encode(question, nodes, xpaths=xpaths, padding="longest") + self.assert_padded_input_match(input_r, input_p, len(input_r), pad_token_id) + + # Encode_plus - Simple input + nodes, xpaths = self.get_nodes_and_xpaths() + input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True) + input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True) + self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) + self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) + input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding="max_length") + input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, max_length=max_length, padding="max_length") + self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) + self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) + + input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, padding="longest") + input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, padding=True) + self.assert_padded_input_match( + input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id + ) + + self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) + + # Encode_plus - Pair input + question, nodes, xpaths = self.get_question_nodes_and_xpaths() + input_r = tokenizer_r.encode_plus( + question, nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True + ) + input_p = tokenizer_p.encode_plus( + question, nodes, xpaths=xpaths, max_length=max_length, pad_to_max_length=True + ) + self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) + self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) + input_r = tokenizer_r.encode_plus( + question, nodes, xpaths=xpaths, max_length=max_length, padding="max_length" + ) + input_p = tokenizer_p.encode_plus( + question, nodes, xpaths=xpaths, max_length=max_length, padding="max_length" + ) + self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) + self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) + input_r = tokenizer_r.encode_plus(question, nodes, xpaths=xpaths, padding="longest") + input_p = tokenizer_p.encode_plus(question, nodes, xpaths=xpaths, padding=True) + self.assert_padded_input_match( + input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id + ) + self.assertSequenceEqual(input_r["attention_mask"], input_p["attention_mask"]) + + # Batch_encode_plus - Simple input + nodes, xpaths = self.get_nodes_and_xpaths_batch() + + input_r = tokenizer_r.batch_encode_plus( + nodes, + xpaths=xpaths, + max_length=max_length, + pad_to_max_length=True, + ) + input_p = tokenizer_p.batch_encode_plus( + nodes, + xpaths=xpaths, + max_length=max_length, + pad_to_max_length=True, + ) + self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) + + input_r = tokenizer_r.batch_encode_plus( + nodes, + xpaths=xpaths, + max_length=max_length, + padding="max_length", + ) + input_p = tokenizer_p.batch_encode_plus( + nodes, + xpaths=xpaths, + max_length=max_length, + padding="max_length", + ) + self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) + + input_r = tokenizer_r.batch_encode_plus( + nodes, + xpaths=xpaths, + max_length=max_length, + padding="longest", + ) + input_p = tokenizer_p.batch_encode_plus( + nodes, + xpaths=xpaths, + max_length=max_length, + padding=True, + ) + self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) + + input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, padding="longest") + input_p = tokenizer_p.batch_encode_plus(nodes, xpaths=xpaths, padding=True) + self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) + + # Batch_encode_plus - Pair input + questions, nodes, xpaths = self.get_question_nodes_and_xpaths_batch() + + input_r = tokenizer_r.batch_encode_plus( + list(zip(questions, nodes)), + is_pair=True, + xpaths=xpaths, + max_length=max_length, + truncation=True, + padding="max_length", + ) + input_p = tokenizer_p.batch_encode_plus( + list(zip(questions, nodes)), + is_pair=True, + xpaths=xpaths, + max_length=max_length, + truncation=True, + padding="max_length", + ) + self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) + + input_r = tokenizer_r.batch_encode_plus( + list(zip(questions, nodes)), + is_pair=True, + xpaths=xpaths, + padding=True, + ) + input_p = tokenizer_p.batch_encode_plus( + list(zip(questions, nodes)), + is_pair=True, + xpaths=xpaths, + padding="longest", + ) + self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) + + # Using pad on single examples after tokenization + nodes, xpaths = self.get_nodes_and_xpaths() + input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths) + input_r = tokenizer_r.pad(input_r) + + input_p = tokenizer_r.encode_plus(nodes, xpaths=xpaths) + input_p = tokenizer_r.pad(input_p) + + self.assert_padded_input_match( + input_r["input_ids"], input_p["input_ids"], len(input_r["input_ids"]), pad_token_id + ) + + # Using pad on single examples after tokenization + input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths) + input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") + + input_p = tokenizer_r.encode_plus(nodes, xpaths=xpaths) + input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") + + self.assert_padded_input_match(input_r["input_ids"], input_p["input_ids"], max_length, pad_token_id) + + # Using pad after tokenization + nodes, xpaths = self.get_nodes_and_xpaths_batch() + input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths) + input_r = tokenizer_r.pad(input_r) + + input_p = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths) + input_p = tokenizer_r.pad(input_p) + + self.assert_batch_padded_input_match(input_r, input_p, len(input_r["input_ids"][0]), pad_token_id) + + # Using pad after tokenization + nodes, xpaths = self.get_nodes_and_xpaths_batch() + input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths) + input_r = tokenizer_r.pad(input_r, max_length=max_length, padding="max_length") + + input_p = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths) + input_p = tokenizer_r.pad(input_p, max_length=max_length, padding="max_length") + + self.assert_batch_padded_input_match(input_r, input_p, max_length, pad_token_id) + + def test_call(self): + # Tests that all call wrap to encode_plus and batch_encode_plus + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + # Test not batched + nodes, xpaths = self.get_nodes_and_xpaths() + encoded_sequences_1 = tokenizer.encode_plus(nodes, xpaths=xpaths) + encoded_sequences_2 = tokenizer(nodes, xpaths=xpaths) + self.assertEqual(encoded_sequences_1, encoded_sequences_2) + + # Test not batched pairs + question, nodes, xpaths = self.get_question_nodes_and_xpaths() + encoded_sequences_1 = tokenizer.encode_plus(nodes, xpaths=xpaths) + encoded_sequences_2 = tokenizer(nodes, xpaths=xpaths) + self.assertEqual(encoded_sequences_1, encoded_sequences_2) + + # Test batched + nodes, xpaths = self.get_nodes_and_xpaths_batch() + encoded_sequences_1 = tokenizer.batch_encode_plus(nodes, is_pair=False, xpaths=xpaths) + encoded_sequences_2 = tokenizer(nodes, xpaths=xpaths) + self.assertEqual(encoded_sequences_1, encoded_sequences_2) + + def test_batch_encode_plus_batch_sequence_length(self): + # Tests that all encoded values have the correct size + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths_batch() + + encoded_sequences = [ + tokenizer.encode_plus(nodes_example, xpaths=xpaths_example) + for nodes_example, xpaths_example in zip(nodes, xpaths) + ] + encoded_sequences_batch = tokenizer.batch_encode_plus( + nodes, is_pair=False, xpaths=xpaths, padding=False + ) + self.assertListEqual( + encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) + ) + + maximum_length = len( + max([encoded_sequence["input_ids"] for encoded_sequence in encoded_sequences], key=len) + ) + + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer, nodes) + + encoded_sequences_padded = [ + tokenizer.encode_plus( + nodes_example, xpaths=xpaths_example, max_length=maximum_length, padding="max_length" + ) + for nodes_example, xpaths_example in zip(nodes, xpaths) + ] + + encoded_sequences_batch_padded = tokenizer.batch_encode_plus( + nodes, is_pair=False, xpaths=xpaths, padding=True + ) + self.assertListEqual( + encoded_sequences_padded, + self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded), + ) + + # check 'longest' is unsensitive to a max length + encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus( + nodes, is_pair=False, xpaths=xpaths, padding=True + ) + encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( + nodes, is_pair=False, xpaths=xpaths, max_length=maximum_length + 10, padding="longest" + ) + for key in encoded_sequences_batch_padded_1.keys(): + self.assertListEqual( + encoded_sequences_batch_padded_1[key], + encoded_sequences_batch_padded_2[key], + ) + + # check 'no_padding' is unsensitive to a max length + encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus( + nodes, is_pair=False, xpaths=xpaths, padding=False + ) + encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus( + nodes, is_pair=False, xpaths=xpaths, max_length=maximum_length + 10, padding=False + ) + for key in encoded_sequences_batch_padded_1.keys(): + self.assertListEqual( + encoded_sequences_batch_padded_1[key], + encoded_sequences_batch_padded_2[key], + ) + + @unittest.skip("batch_encode_plus does not handle overflowing tokens.") + def test_batch_encode_plus_overflowing_tokens(self): + pass + + def test_batch_encode_plus_padding(self): + # Test that padded sequences are equivalent between batch_encode_plus and encode_plus + + # Right padding tests + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths_batch() + + max_length = 100 + + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer, nodes) + + encoded_sequences = [ + tokenizer.encode_plus( + nodes_example, xpaths=xpaths_example, max_length=max_length, padding="max_length" + ) + for nodes_example, xpaths_example in zip(nodes, xpaths) + ] + encoded_sequences_batch = tokenizer.batch_encode_plus( + nodes, is_pair=False, xpaths=xpaths, max_length=max_length, padding="max_length" + ) + self.assertListEqual( + encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) + ) + + # Left padding tests + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + tokenizer.padding_side = "left" + nodes, xpaths = self.get_nodes_and_xpaths_batch() + + max_length = 100 + + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer, nodes) + + encoded_sequences = [ + tokenizer.encode_plus( + nodes_example, xpaths=xpaths_example, max_length=max_length, padding="max_length" + ) + for nodes_example, xpaths_example in zip(nodes, xpaths) + ] + encoded_sequences_batch = tokenizer.batch_encode_plus( + nodes, is_pair=False, xpaths=xpaths, max_length=max_length, padding="max_length" + ) + self.assertListEqual( + encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch) + ) + + def test_padding_to_multiple_of(self): + tokenizers = self.get_tokenizers() + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + if tokenizer.pad_token is None: + self.skipTest("No padding token.") + else: + nodes, xpaths = self.get_nodes_and_xpaths() + + # empty_tokens = tokenizer([""], [[]], padding=True, pad_to_multiple_of=8) + normal_tokens = tokenizer(nodes, xpaths=xpaths, padding=True, pad_to_multiple_of=8) + # for key, value in empty_tokens.items(): + # self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") + for key, value in normal_tokens.items(): + self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") + + normal_tokens = tokenizer(nodes, xpaths=xpaths, pad_to_multiple_of=8) + for key, value in normal_tokens.items(): + self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") + + # Should also work with truncation + normal_tokens = tokenizer( + nodes, xpaths=xpaths, padding=True, truncation=True, pad_to_multiple_of=8 + ) + for key, value in normal_tokens.items(): + self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") + + # truncation to something which is not a multiple of pad_to_multiple_of raises an error + self.assertRaises( + ValueError, + tokenizer.__call__, + nodes, + xpaths=xpaths, + padding=True, + truncation=True, + max_length=12, + pad_to_multiple_of=8, + ) + + def test_tokenizer_slow_store_full_signature(self): + signature = inspect.signature(self.tokenizer_class.__init__) + tokenizer = self.get_tokenizer() + + for parameter_name, parameter in signature.parameters.items(): + if parameter.default != inspect.Parameter.empty: + self.assertIn(parameter_name, tokenizer.init_kwargs) + + def test_build_inputs_with_special_tokens(self): + if not self.test_slow_tokenizer: + # as we don't have a slow version, we can't compare the outputs between slow and fast versions + return + + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + # Input tokens id + nodes, xpaths = self.get_nodes_and_xpaths() + input_simple = tokenizer_p.encode(nodes, xpaths=xpaths, add_special_tokens=False) + input_pair = tokenizer_p.encode(nodes, xpaths=xpaths, add_special_tokens=False) + + # Generate output + output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple) + output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple) + self.assertEqual(output_p, output_r) + + # Generate pair output + output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair) + output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair) + self.assertEqual(output_p, output_r) + + def test_special_tokens_mask_input_pairs(self): + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths() + encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + encoded_sequence_dict = tokenizer.encode_plus( + nodes, + xpaths=xpaths, + add_special_tokens=True, + return_special_tokens_mask=True, + # add_prefix_space=False, + ) + encoded_sequence_w_special = encoded_sequence_dict["input_ids"] + special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] + self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) + + filtered_sequence = [ + (x if not special_tokens_mask[i] else None) for i, x in enumerate(encoded_sequence_w_special) + ] + filtered_sequence = [x for x in filtered_sequence if x is not None] + self.assertEqual(encoded_sequence, filtered_sequence) + + def test_special_tokens_mask(self): + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths() + # Testing single inputs + encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + encoded_sequence_dict = tokenizer.encode_plus( + nodes, xpaths=xpaths, add_special_tokens=True, return_special_tokens_mask=True + ) + encoded_sequence_w_special = encoded_sequence_dict["input_ids"] + special_tokens_mask = encoded_sequence_dict["special_tokens_mask"] + self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special)) + + filtered_sequence = [x for i, x in enumerate(encoded_sequence_w_special) if not special_tokens_mask[i]] + self.assertEqual(encoded_sequence, filtered_sequence) + + def test_save_and_load_tokenizer(self): + # safety check on max_len default value so we are sure the test works + tokenizers = self.get_tokenizers() + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + self.assertNotEqual(tokenizer.model_max_length, 42) + + # Now let's start the test + tokenizers = self.get_tokenizers() + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + # Isolate this from the other tests because we save additional tokens/etc + nodes, xpaths = self.get_nodes_and_xpaths() + tmpdirname = tempfile.mkdtemp() + + before_tokens = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + before_vocab = tokenizer.get_vocab() + tokenizer.save_pretrained(tmpdirname) + + after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) + after_tokens = after_tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + after_vocab = after_tokenizer.get_vocab() + self.assertListEqual(before_tokens, after_tokens) + self.assertDictEqual(before_vocab, after_vocab) + + shutil.rmtree(tmpdirname) + + def test_right_and_left_padding(self): + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths() + sequence = "Sequence" + padding_size = 10 + + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer, sequence) + + padding_idx = tokenizer.pad_token_id + + # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True + tokenizer.padding_side = "right" + encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths) + sequence_length = len(encoded_sequence) + padded_sequence = tokenizer.encode( + nodes, xpaths=xpaths, max_length=sequence_length + padding_size, padding="max_length" + ) + padded_sequence_length = len(padded_sequence) + assert sequence_length + padding_size == padded_sequence_length + assert encoded_sequence + [padding_idx] * padding_size == padded_sequence + + # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True + tokenizer.padding_side = "left" + encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths) + sequence_length = len(encoded_sequence) + padded_sequence = tokenizer.encode( + nodes, xpaths=xpaths, max_length=sequence_length + padding_size, padding="max_length" + ) + padded_sequence_length = len(padded_sequence) + assert sequence_length + padding_size == padded_sequence_length + assert [padding_idx] * padding_size + encoded_sequence == padded_sequence + + # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding' + encoded_sequence = tokenizer.encode(nodes, xpaths=xpaths) + sequence_length = len(encoded_sequence) + + tokenizer.padding_side = "right" + padded_sequence_right = tokenizer.encode(nodes, xpaths=xpaths, padding=True) + padded_sequence_right_length = len(padded_sequence_right) + assert sequence_length == padded_sequence_right_length + assert encoded_sequence == padded_sequence_right + + tokenizer.padding_side = "left" + padded_sequence_left = tokenizer.encode(nodes, xpaths=xpaths, padding="longest") + padded_sequence_left_length = len(padded_sequence_left) + assert sequence_length == padded_sequence_left_length + assert encoded_sequence == padded_sequence_left + + tokenizer.padding_side = "right" + padded_sequence_right = tokenizer.encode(nodes, xpaths=xpaths) + padded_sequence_right_length = len(padded_sequence_right) + assert sequence_length == padded_sequence_right_length + assert encoded_sequence == padded_sequence_right + + tokenizer.padding_side = "left" + padded_sequence_left = tokenizer.encode(nodes, xpaths=xpaths, padding=False) + padded_sequence_left_length = len(padded_sequence_left) + assert sequence_length == padded_sequence_left_length + assert encoded_sequence == padded_sequence_left + + def test_token_type_ids(self): + tokenizers = self.get_tokenizers() + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + # test 1: single sequence + nodes, xpaths = self.get_nodes_and_xpaths() + + output = tokenizer(nodes, xpaths=xpaths, return_token_type_ids=True) + + # Assert that the token type IDs have the same length as the input IDs + self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"])) + + # Assert that the token type IDs have the same length as the attention mask + self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"])) + + self.assertIn(0, output["token_type_ids"]) + self.assertNotIn(1, output["token_type_ids"]) + + # test 2: two sequences (question + nodes) + question, nodes, xpaths = self.get_question_nodes_and_xpaths() + + output = tokenizer(question, nodes, xpaths, return_token_type_ids=True) + + # Assert that the token type IDs have the same length as the input IDs + self.assertEqual(len(output["token_type_ids"]), len(output["input_ids"])) + + # Assert that the token type IDs have the same length as the attention mask + self.assertEqual(len(output["token_type_ids"]), len(output["attention_mask"])) + + self.assertIn(0, output["token_type_ids"]) + + def test_offsets_mapping(self): + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + text = ["a", "wonderful", "test"] + xpaths = ["html/body" for _ in range(len(text))] + + # No pair + tokens_with_offsets = tokenizer_r.encode_plus( + text, + xpaths=xpaths, + return_special_tokens_mask=True, + return_offsets_mapping=True, + add_special_tokens=True, + ) + added_tokens = tokenizer_r.num_special_tokens_to_add(False) + offsets = tokens_with_offsets["offset_mapping"] + + # Assert there is the same number of tokens and offsets + self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) + + # Assert there is online added_tokens special_tokens + self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) + + # Pairs + text = "what's his name" + pair = ["a", "wonderful", "test"] + xpaths = ["html/body" for _ in range(len(pair))] + tokens_with_offsets = tokenizer_r.encode_plus( + text, + pair, + xpaths=xpaths, + return_special_tokens_mask=True, + return_offsets_mapping=True, + add_special_tokens=True, + ) + added_tokens = tokenizer_r.num_special_tokens_to_add(True) + offsets = tokens_with_offsets["offset_mapping"] + + # Assert there is the same number of tokens and offsets + self.assertEqual(len(offsets), len(tokens_with_offsets["input_ids"])) + + # Assert there is online added_tokens special_tokens + self.assertEqual(sum(tokens_with_offsets["special_tokens_mask"]), added_tokens) + + @require_torch + @slow + def test_torch_encode_plus_sent_to_model(self): + import torch + + from transformers import MODEL_MAPPING, TOKENIZER_MAPPING + + MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING) + + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + if tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING: + return + + config_class, model_class = MODEL_TOKENIZER_MAPPING[tokenizer.__class__] + config = config_class() + + if config.is_encoder_decoder or config.pad_token_id is None: + return + + model = model_class(config) + + # Make sure the model contains at least the full vocabulary size in its embedding matrix + is_using_common_embeddings = hasattr(model.get_input_embeddings(), "weight") + assert ( + (model.get_input_embeddings().weight.shape[0] >= len(tokenizer)) + if is_using_common_embeddings + else True + ) + + # Build sequence + nodes, xpaths = self.get_nodes_and_xpaths() + encoded_sequence = tokenizer.encode_plus(nodes, xpaths=xpaths, return_tensors="pt") + batch_encoded_sequence = tokenizer.batch_encode_plus( + [nodes, nodes], [xpaths, xpaths], return_tensors="pt" + ) + # This should not fail + + with torch.no_grad(): # saves some time + model(**encoded_sequence) + model(**batch_encoded_sequence) + + def test_rust_and_python_full_tokenizers(self): + if not self.test_rust_tokenizer: + return + + if not self.test_slow_tokenizer: + # as we don't have a slow version, we can't compare the outputs between slow and fast versions + return + + tokenizer = self.get_tokenizer() + rust_tokenizer = self.get_rust_tokenizer() + + nodes, xpaths = self.get_nodes_and_xpaths() + + ids = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + rust_ids = rust_tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + self.assertListEqual(ids, rust_ids) + + ids = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=True) + rust_ids = rust_tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=True) + self.assertListEqual(ids, rust_ids) + + def test_tokenization_python_rust_equals(self): + if not self.test_slow_tokenizer: + # as we don't have a slow version, we can't compare the outputs between slow and fast versions + return + + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + nodes, xpaths = self.get_nodes_and_xpaths() + + # Ensure basic input match + input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths) + input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths) + + for key in filter( + lambda x: x + in ["input_ids", "token_type_ids", "attention_mask", "xpath_tags_seq", "xpath_subs_seq"], + input_p.keys(), + ): + self.assertSequenceEqual(input_p[key], input_r[key]) + + input_pairs_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths) + input_pairs_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths) + + for key in filter( + lambda x: x + in ["input_ids", "token_type_ids", "attention_mask", "xpath_tags_seq", "xpath_subs_seq"], + input_p.keys(), + ): + self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key]) + + nodes = ["hello" for _ in range(1000)] + xpaths = ["html/body" for _ in range(1000)] + + # Ensure truncation match + input_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, max_length=512, truncation=True) + input_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, max_length=512, truncation=True) + + for key in filter( + lambda x: x + in ["input_ids", "token_type_ids", "attention_mask", "xpath_tags_seq", "xpath_subs_seq"], + input_p.keys(), + ): + self.assertSequenceEqual(input_p[key], input_r[key]) + + # Ensure truncation with stride match + input_p = tokenizer_p.encode_plus( + nodes, xpaths=xpaths, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True + ) + input_r = tokenizer_r.encode_plus( + nodes, xpaths=xpaths, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True + ) + + for key in filter( + lambda x: x + in ["input_ids", "token_type_ids", "attention_mask", "xpath_tags_seq", "xpath_subs_seq"], + input_p.keys(), + ): + self.assertSequenceEqual(input_p[key], input_r[key][0]) + + def test_embeded_special_tokens(self): + if not self.test_slow_tokenizer: + # as we don't have a slow version, we can't compare the outputs between slow and fast versions + return + + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) + nodes, xpaths = self.get_nodes_and_xpaths() + tokens_r = tokenizer_r.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True) + tokens_p = tokenizer_p.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True) + + for key in tokens_p.keys(): + self.assertEqual(tokens_r[key], tokens_p[key]) + + if "token_type_ids" in tokens_r: + self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) + + tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) + tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) + self.assertSequenceEqual(tokens_r, tokens_p) + + def test_compare_add_special_tokens(self): + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False) + + nodes, xpaths = self.get_nodes_and_xpaths() + # tokenize() + no_special_tokens = tokenizer_r.tokenize(" ".join(nodes), add_special_tokens=False) + with_special_tokens = tokenizer_r.tokenize(" ".join(nodes), add_special_tokens=True) + self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add) + + # encode() + no_special_tokens = tokenizer_r.encode(nodes, xpaths=xpaths, add_special_tokens=False) + with_special_tokens = tokenizer_r.encode(nodes, xpaths=xpaths, add_special_tokens=True) + self.assertEqual(len(no_special_tokens), len(with_special_tokens) - simple_num_special_tokens_to_add) + + # encode_plus() + no_special_tokens = tokenizer_r.encode_plus(nodes, xpaths=xpaths, add_special_tokens=False) + with_special_tokens = tokenizer_r.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True) + for key in no_special_tokens.keys(): + self.assertEqual( + len(no_special_tokens[key]), + len(with_special_tokens[key]) - simple_num_special_tokens_to_add, + ) + + # # batch_encode_plus + nodes, xpaths = self.get_nodes_and_xpaths_batch() + + no_special_tokens = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, add_special_tokens=False) + with_special_tokens = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths, add_special_tokens=True) + for key in no_special_tokens.keys(): + for i_no, i_with in zip(no_special_tokens[key], with_special_tokens[key]): + self.assertEqual(len(i_no), len(i_with) - simple_num_special_tokens_to_add) + + @slow + def test_markuplm_truncation_integration_test(self): + nodes, xpaths = self.get_nodes_and_xpaths() + + tokenizer = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base", model_max_length=512) + + for i in range(12, 512): + new_encoded_inputs = tokenizer.encode(nodes, xpaths=xpaths, max_length=i, truncation=True) + + # Ensure that the input IDs are less than the max length defined. + self.assertLessEqual(len(new_encoded_inputs), i) + + tokenizer.model_max_length = 20 + new_encoded_inputs = tokenizer.encode(nodes, xpaths=xpaths, truncation=True) + dropped_encoded_inputs = tokenizer.encode(nodes, xpaths=xpaths, truncation=True) + + # Ensure that the input IDs are still truncated when no max_length is specified + self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs) + self.assertLessEqual(len(new_encoded_inputs), 20) + + @is_pt_tf_cross_test + def test_batch_encode_plus_tensors(self): + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths_batch() + + # A Tensor cannot be build by sequences which are not the same size + self.assertRaises(ValueError, tokenizer.batch_encode_plus, nodes, xpaths=xpaths, return_tensors="pt") + self.assertRaises(ValueError, tokenizer.batch_encode_plus, nodes, xpaths=xpaths, return_tensors="tf") + + if tokenizer.pad_token_id is None: + self.assertRaises( + ValueError, + tokenizer.batch_encode_plus, + nodes, + xpaths=xpaths, + padding=True, + return_tensors="pt", + ) + self.assertRaises( + ValueError, + tokenizer.batch_encode_plus, + nodes, + xpaths=xpaths, + padding="longest", + return_tensors="tf", + ) + else: + pytorch_tensor = tokenizer.batch_encode_plus( + nodes, xpaths=xpaths, padding=True, return_tensors="pt" + ) + tensorflow_tensor = tokenizer.batch_encode_plus( + nodes, xpaths=xpaths, padding="longest", return_tensors="tf" + ) + encoded_sequences = tokenizer.batch_encode_plus(nodes, xpaths=xpaths, padding=True) + + for key in encoded_sequences.keys(): + pytorch_value = pytorch_tensor[key].tolist() + tensorflow_value = tensorflow_tensor[key].numpy().tolist() + encoded_value = encoded_sequences[key] + + self.assertEqual(pytorch_value, tensorflow_value, encoded_value) + + def test_sequence_ids(self): + tokenizers = self.get_tokenizers() + for tokenizer in tokenizers: + if not tokenizer.is_fast: + continue + with self.subTest(f"{tokenizer.__class__.__name__}"): + seq_0 = "Test this method." + seq_1 = ["With", "these", "inputs."] + xpaths = ["html/body" for _ in range(len(seq_1))] + + # We want to have sequence 0 and sequence 1 are tagged + # respectively with 0 and 1 token_ids + # (regardless of whether the model use token type ids) + # We use this assumption in the QA pipeline among other place + output = tokenizer(seq_0.split(), xpaths=xpaths) + self.assertIn(0, output.sequence_ids()) + + output = tokenizer(seq_0, seq_1, xpaths=xpaths) + self.assertIn(0, output.sequence_ids()) + self.assertIn(1, output.sequence_ids()) + + if tokenizer.num_special_tokens_to_add(pair=True): + self.assertIn(None, output.sequence_ids()) + + def test_special_tokens_initialization(self): + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + added_tokens = [AddedToken("", lstrip=True)] + + tokenizer_r = self.rust_tokenizer_class.from_pretrained( + pretrained_name, additional_special_tokens=added_tokens, **kwargs + ) + nodes = "Hey this is a token".split() + xpaths = ["html/body" for _ in range(len(nodes))] + r_output = tokenizer_r.encode(nodes, xpaths=xpaths) + + special_token_id = tokenizer_r.encode([""], xpaths=["html/body"], add_special_tokens=False)[0] + + self.assertTrue(special_token_id in r_output) + + if self.test_slow_tokenizer: + tokenizer_cr = self.rust_tokenizer_class.from_pretrained( + pretrained_name, additional_special_tokens=added_tokens, **kwargs + ) + tokenizer_p = self.tokenizer_class.from_pretrained( + pretrained_name, additional_special_tokens=added_tokens, **kwargs + ) + + nodes = "Hey this is a token".split() + xpaths = ["html/body" for _ in range(len(nodes))] + + p_output = tokenizer_p.encode(nodes, xpaths=xpaths) + cr_output = tokenizer_cr.encode(nodes, xpaths=xpaths) + + self.assertEqual(p_output, r_output) + self.assertEqual(cr_output, r_output) + self.assertTrue(special_token_id in p_output) + self.assertTrue(special_token_id in cr_output) + + def test_training_new_tokenizer(self): + # This feature only exists for fast tokenizers + if not self.test_rust_tokenizer: + return + + tokenizer = self.get_rust_tokenizer() + new_tokenizer = tokenizer.train_new_from_iterator(SMALL_TRAINING_CORPUS, 100) + + # Test we can use the new tokenizer with something not seen during training + text = [["this", "is", "the"], ["how", "are", "you"]] + xpaths = [["html/body"] * 3, ["html/body"] * 3] + inputs = new_tokenizer(text, xpaths=xpaths) + self.assertEqual(len(inputs["input_ids"]), 2) + decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) + expected_result = ( # original expected result "this is the" seems contradicts to roberta-based tokenizer + "thisisthe" + ) + + if tokenizer.backend_tokenizer.normalizer is not None: + expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) + self.assertEqual(expected_result, decoded_input) + + # We check that the parameters of the tokenizer remained the same + # Check we have the same number of added_tokens for both pair and non-pair inputs. + self.assertEqual(tokenizer.num_special_tokens_to_add(False), new_tokenizer.num_special_tokens_to_add(False)) + self.assertEqual(tokenizer.num_special_tokens_to_add(True), new_tokenizer.num_special_tokens_to_add(True)) + + # Check we have the correct max_length for both pair and non-pair inputs. + self.assertEqual(tokenizer.max_len_single_sentence, new_tokenizer.max_len_single_sentence) + self.assertEqual(tokenizer.max_len_sentences_pair, new_tokenizer.max_len_sentences_pair) + + # Assert the set of special tokens match as we didn't ask to change them + self.assertSequenceEqual( + tokenizer.all_special_tokens_extended, + new_tokenizer.all_special_tokens_extended, + ) + + self.assertDictEqual(tokenizer.special_tokens_map, new_tokenizer.special_tokens_map) + + def test_training_new_tokenizer_with_special_tokens_change(self): + # This feature only exists for fast tokenizers + if not self.test_rust_tokenizer: + return + + tokenizer = self.get_rust_tokenizer() + # Test with a special tokens map + class_signature = inspect.signature(tokenizer.__class__) + if "cls_token" in class_signature.parameters: + new_tokenizer = tokenizer.train_new_from_iterator( + SMALL_TRAINING_CORPUS, 100, special_tokens_map={tokenizer.cls_token: ""} + ) + cls_id = new_tokenizer.get_vocab()[""] + self.assertEqual(new_tokenizer.cls_token, "") + self.assertEqual(new_tokenizer.cls_token_id, cls_id) + + # Create a new mapping from the special tokens defined in the original tokenizer + special_tokens_list = SpecialTokensMixin.SPECIAL_TOKENS_ATTRIBUTES.copy() + special_tokens_list.remove("additional_special_tokens") + special_tokens_map = {} + for token in special_tokens_list: + # Get the private one to avoid unnecessary warnings. + if getattr(tokenizer, f"_{token}") is not None: + special_token = getattr(tokenizer, token) + special_tokens_map[special_token] = f"{special_token}a" + + # Train new tokenizer + new_tokenizer = tokenizer.train_new_from_iterator( + SMALL_TRAINING_CORPUS, 100, special_tokens_map=special_tokens_map + ) + + # Check the changes + for token in special_tokens_list: + # Get the private one to avoid unnecessary warnings. + if getattr(tokenizer, f"_{token}") is None: + continue + special_token = getattr(tokenizer, token) + if special_token in special_tokens_map: + new_special_token = getattr(new_tokenizer, token) + self.assertEqual(special_tokens_map[special_token], new_special_token) + + new_id = new_tokenizer.get_vocab()[new_special_token] + self.assertEqual(getattr(new_tokenizer, f"{token}_id"), new_id) + + # Check if the AddedToken / string format has been kept + for special_token in tokenizer.all_special_tokens_extended: + if isinstance(special_token, AddedToken) and special_token.content not in special_tokens_map: + # The special token must appear identically in the list of the new tokenizer. + self.assertTrue( + special_token in new_tokenizer.all_special_tokens_extended, + f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}", + ) + elif isinstance(special_token, AddedToken): + # The special token must appear in the list of the new tokenizer as an object of type AddedToken with + # the same parameters as the old AddedToken except the content that the user has requested to change. + special_token_str = special_token.content + new_special_token_str = special_tokens_map[special_token_str] + + find = False + for candidate in new_tokenizer.all_special_tokens_extended: + if ( + isinstance(candidate, AddedToken) + and candidate.content == new_special_token_str + and candidate.lstrip == special_token.lstrip + and candidate.rstrip == special_token.rstrip + and candidate.normalized == special_token.normalized + and candidate.single_word == special_token.single_word + ): + find = True + break + self.assertTrue( + find, + f"'{new_special_token_str}' doesn't appear in the list " + f"'{new_tokenizer.all_special_tokens_extended}' as an AddedToken with the same parameters as " + f"'{special_token}' in the list {tokenizer.all_special_tokens_extended}", + ) + elif special_token not in special_tokens_map: + # The special token must appear identically in the list of the new tokenizer. + self.assertTrue( + special_token in new_tokenizer.all_special_tokens_extended, + f"'{special_token}' should be in {new_tokenizer.all_special_tokens_extended}", + ) + + else: + # The special token must appear in the list of the new tokenizer as an object of type string. + self.assertTrue(special_tokens_map[special_token] in new_tokenizer.all_special_tokens_extended) + + # Test we can use the new tokenizer with something not seen during training + nodes = [["this", "is"], ["hello", "🤗"]] + xpaths = [["html/body"] * 2, ["html/body"] * 2] + inputs = new_tokenizer(nodes, xpaths=xpaths) + self.assertEqual(len(inputs["input_ids"]), 2) + decoded_input = new_tokenizer.decode(inputs["input_ids"][0], skip_special_tokens=True) + expected_result = "thisis" # same as line 1399 + + if tokenizer.backend_tokenizer.normalizer is not None: + expected_result = tokenizer.backend_tokenizer.normalizer.normalize_str(expected_result) + self.assertEqual(expected_result, decoded_input) + + def test_prepare_for_model(self): + tokenizers = self.get_tokenizers(do_lower_case=False) + for tokenizer in tokenizers: + # only test prepare_for_model for the slow tokenizer + if tokenizer.__class__.__name__ == "MarkupLMTokenizerFast": + continue + with self.subTest(f"{tokenizer.__class__.__name__}"): + nodes, xpaths = self.get_nodes_and_xpaths() + prepared_input_dict = tokenizer.prepare_for_model(nodes, xpaths=xpaths, add_special_tokens=True) + + input_dict = tokenizer.encode_plus(nodes, xpaths=xpaths, add_special_tokens=True) + + self.assertEqual(input_dict, prepared_input_dict) + + def test_padding_different_model_input_name(self): + if not self.test_slow_tokenizer: + # as we don't have a slow version, we can't compare the outputs between slow and fast versions + return + + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): + tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) + self.assertEqual(tokenizer_p.pad_token_id, tokenizer_r.pad_token_id) + pad_token_id = tokenizer_p.pad_token_id + + nodes, xpaths = self.get_nodes_and_xpaths_batch() + + input_r = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths) + input_p = tokenizer_r.batch_encode_plus(nodes, xpaths=xpaths) + + # rename encoded batch to "inputs" + input_r["inputs"] = input_r[tokenizer_r.model_input_names[0]] + del input_r[tokenizer_r.model_input_names[0]] + + input_p["inputs"] = input_p[tokenizer_p.model_input_names[0]] + del input_p[tokenizer_p.model_input_names[0]] + + # Renaming `input_ids` to `inputs` + tokenizer_r.model_input_names = ["inputs"] + tokenizer_r.model_input_names[1:] + tokenizer_p.model_input_names = ["inputs"] + tokenizer_p.model_input_names[1:] + + input_r = tokenizer_r.pad(input_r, padding="longest") + input_p = tokenizer_r.pad(input_p, padding="longest") + + max_length = len(input_p["inputs"][0]) + self.assert_batch_padded_input_match( + input_r, input_p, max_length, pad_token_id, model_main_input_name="inputs" + ) + + def test_batch_encode_dynamic_overflowing(self): + """ + When calling batch_encode with multiple sequences, it can return different number of + overflowing encoding for each sequence: + [ + Sequence 1: [Encoding 1, Encoding 2], + Sequence 2: [Encoding 1], + Sequence 3: [Encoding 1, Encoding 2, ... Encoding N] + ] + This needs to be padded so that it can represented as a tensor + """ + for tokenizer, pretrained_name, kwargs in self.tokenizers_list: + tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) + + with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name}, {tokenizer.__class__.__name__})"): + if is_torch_available(): + returned_tensor = "pt" + elif is_tf_available(): + returned_tensor = "tf" + else: + returned_tensor = "jax" + + # Single example + nodes, xpaths = self.get_nodes_and_xpaths() + tokens = tokenizer.encode_plus( + nodes, + xpaths=xpaths, + max_length=1, + padding=True, + truncation=True, + return_tensors=returned_tensor, + return_overflowing_tokens=True, + ) + + for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): + if "xpath" not in key: + self.assertEqual(len(tokens[key].shape), 2) + else: + self.assertEqual(len(tokens[key].shape), 3) + + # Batch of examples + # For these 2 examples, 3 training examples will be created + nodes, xpaths = self.get_nodes_and_xpaths_batch() + tokens = tokenizer.batch_encode_plus( + nodes, + xpaths=xpaths, + max_length=6, + padding=True, + truncation="only_first", + return_tensors=returned_tensor, + return_overflowing_tokens=True, + ) + + for key in filter(lambda x: "overflow_to_sample_mapping" not in x, tokens.keys()): + if "xpath" not in key: + self.assertEqual(len(tokens[key].shape), 2) + self.assertEqual(tokens[key].shape[-1], 6) + else: + self.assertEqual(len(tokens[key].shape), 3) + self.assertEqual(tokens[key].shape[-2], 6) + + @unittest.skip("TO DO: overwrite this very extensive test.") + def test_alignement_methods(self): + pass + + def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5): + toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))] + toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) + toks = list( + filter( + lambda t: [t[0]] + == tokenizer.encode(t[1].split(" "), xpaths=len(t[1]) * ["html/body"], add_special_tokens=False), + toks, + ) + ) + if max_length is not None and len(toks) > max_length: + toks = toks[:max_length] + if min_length is not None and len(toks) < min_length and len(toks) > 0: + while len(toks) < min_length: + toks = toks + toks + # toks_str = [t[1] for t in toks] + toks_ids = [t[0] for t in toks] + + # Ensure consistency + output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) + # an extra blank will cause inconsistency: ["a","b",] & "a b" + """ + if " " not in output_txt and len(toks_ids) > 1: + output_txt = ( + tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + + " " + + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) + ) + """ + if with_prefix_space: + output_txt = " " + output_txt + nodes = output_txt.split(" ") + xpaths = ["html/body" for i in range(len(nodes))] + output_ids = tokenizer.encode(nodes, xpaths=xpaths, add_special_tokens=False) + return nodes, xpaths, output_ids + + def test_maximum_encoding_length_pair_input(self): + # slow part fixed, fast part not + tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + # Build a sequence from our model's vocabulary + stride = 2 + seq_0, xpaths_0, ids = self.get_clean_sequence(tokenizer, max_length=20) + question_0 = " ".join(map(str, seq_0)) + if len(ids) <= 2 + stride: + seq_0 = (seq_0 + " ") * (2 + stride) + ids = None + + seq0_tokens = tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False) + self.assertGreater(len(seq0_tokens["input_ids"]), 2 + stride) + question_1 = "This is another sentence to be encoded." + seq_1 = ["hello", "world"] + xpaths_1 = ["html/body" for i in range(len(seq_1))] + seq1_tokens = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False) + if abs(len(seq0_tokens["input_ids"]) - len(seq1_tokens["input_ids"])) <= 2: + seq1_tokens_input_ids = seq1_tokens["input_ids"] + seq1_tokens["input_ids"] + seq_1 = tokenizer.decode(seq1_tokens_input_ids, clean_up_tokenization_spaces=False) + seq_1 = seq_1.split(" ") + xpaths_1 = ["html/body" for i in range(len(seq_1))] + seq1_tokens = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False) + + self.assertGreater(len(seq1_tokens["input_ids"]), 2 + stride) + + smallest = ( + seq1_tokens["input_ids"] + if len(seq0_tokens["input_ids"]) > len(seq1_tokens["input_ids"]) + else seq0_tokens["input_ids"] + ) + + # We are not using the special tokens - a bit too hard to test all the tokenizers with this + # TODO try this again later + sequence = tokenizer(question_0, seq_1, xpaths=xpaths_1, add_special_tokens=False) + + # Test with max model input length + model_max_length = tokenizer.model_max_length + self.assertEqual(model_max_length, 100) + seq_2 = seq_0 * model_max_length + question_2 = " ".join(map(str, seq_2)) + xpaths_2 = xpaths_0 * model_max_length + # assertgreater -> assertgreaterequal + self.assertGreaterEqual(len(seq_2), model_max_length) + + sequence1 = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False) + total_length1 = len(sequence1["input_ids"]) + sequence2 = tokenizer(question_2, seq_1, xpaths=xpaths_1, add_special_tokens=False) + total_length2 = len(sequence2["input_ids"]) + self.assertLess(total_length1, model_max_length, "Issue with the testing sequence, please update it.") + self.assertGreater( + total_length2, model_max_length, "Issue with the testing sequence, please update it." + ) + + # Simple + padding_strategies = ( + [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] + ) + for padding_state in padding_strategies: + with self.subTest(f"{tokenizer.__class__.__name__} Padding: {padding_state}"): + for truncation_state in [True, "longest_first", "only_first"]: + with self.subTest(f"{tokenizer.__class__.__name__} Truncation: {truncation_state}"): + output = tokenizer( + question_2, + seq_1, + xpaths=xpaths_1, + padding=padding_state, + truncation=truncation_state, + ) + self.assertEqual(len(output["input_ids"]), model_max_length) + self.assertEqual(len(output["xpath_tags_seq"]), model_max_length) + self.assertEqual(len(output["xpath_subs_seq"]), model_max_length) + + output = tokenizer( + [question_2], + [seq_1], + xpaths=[xpaths_1], + padding=padding_state, + truncation=truncation_state, + ) + self.assertEqual(len(output["input_ids"][0]), model_max_length) + self.assertEqual(len(output["xpath_tags_seq"][0]), model_max_length) + self.assertEqual(len(output["xpath_subs_seq"][0]), model_max_length) + + # Simple + output = tokenizer( + question_1, seq_2, xpaths=xpaths_2, padding=padding_state, truncation="only_second" + ) + self.assertEqual(len(output["input_ids"]), model_max_length) + self.assertEqual(len(output["xpath_tags_seq"]), model_max_length) + self.assertEqual(len(output["xpath_subs_seq"]), model_max_length) + + output = tokenizer( + [question_1], [seq_2], xpaths=[xpaths_2], padding=padding_state, truncation="only_second" + ) + self.assertEqual(len(output["input_ids"][0]), model_max_length) + self.assertEqual(len(output["xpath_tags_seq"][0]), model_max_length) + self.assertEqual(len(output["xpath_subs_seq"][0]), model_max_length) + + # Simple with no truncation + # Reset warnings + tokenizer.deprecation_warnings = {} + with self.assertLogs("transformers", level="WARNING") as cm: + output = tokenizer( + question_1, seq_2, xpaths=xpaths_2, padding=padding_state, truncation=False + ) + self.assertNotEqual(len(output["input_ids"]), model_max_length) + self.assertNotEqual(len(output["xpath_tags_seq"]), model_max_length) + self.assertNotEqual(len(output["xpath_subs_seq"]), model_max_length) + self.assertEqual(len(cm.records), 1) + self.assertTrue( + cm.records[0].message.startswith( + "Token indices sequence length is longer than the specified maximum sequence length" + " for this model" + ) + ) + + tokenizer.deprecation_warnings = {} + with self.assertLogs("transformers", level="WARNING") as cm: + output = tokenizer( + [question_1], [seq_2], xpaths=[xpaths_2], padding=padding_state, truncation=False + ) + self.assertNotEqual(len(output["input_ids"][0]), model_max_length) + self.assertNotEqual(len(output["xpath_tags_seq"][0]), model_max_length) + self.assertNotEqual(len(output["xpath_subs_seq"][0]), model_max_length) + self.assertEqual(len(cm.records), 1) + self.assertTrue( + cm.records[0].message.startswith( + "Token indices sequence length is longer than the specified maximum sequence length" + " for this model" + ) + ) + # Check the order of Sequence of input ids, overflowing tokens and xpath_tags_seq sequence with truncation + truncated_first_sequence = ( + tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"][:-2] + + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["input_ids"] + ) + truncated_second_sequence = ( + tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"] + + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["input_ids"][:-2] + ) + truncated_longest_sequence = ( + truncated_first_sequence if len(seq0_tokens) > len(seq1_tokens) else truncated_second_sequence + ) + + overflow_first_sequence = ( + tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"][-(2 + stride) :] + + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["input_ids"] + ) + overflow_second_sequence = ( + tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"] + + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["input_ids"][-(2 + stride) :] + ) + overflow_longest_sequence = ( + overflow_first_sequence if len(seq0_tokens) > len(seq1_tokens) else overflow_second_sequence + ) + + xpath_tags_seq_first = [[5] * 50] * ( + len(tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"]) - 2 + ) + xpath_tags_seq_first_sequence = ( + xpath_tags_seq_first + + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["xpath_tags_seq"] + ) + overflowing_token_xpath_tags_seq_first_sequence_slow = [[5] * 50] * (2 + stride) + overflowing_token_xpath_tags_seq_first_sequence_fast = [[5] * 50] * (2 + stride) + tokenizer( + seq_1, xpaths=xpaths_1, add_special_tokens=False + )["xpath_tags_seq"] + + xpath_tags_seq_second = [[5] * 50] * len( + tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"] + ) + xpath_tags_seq_second_sequence = ( + xpath_tags_seq_second + + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["xpath_tags_seq"][:-2] + ) + overflowing_token_xpath_tags_seq_second_sequence_slow = tokenizer( + seq_1, xpaths=xpaths_1, add_special_tokens=False + )["xpath_tags_seq"][-(2 + stride) :] + overflowing_token_xpath_tags_seq_second_sequence_fast = [[5] * 50] * len( + tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False)["input_ids"] + ) + tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False)["xpath_tags_seq"][-(2 + stride) :] + + xpath_tags_seq_longest_sequence = ( + xpath_tags_seq_first_sequence + if len(seq0_tokens) > len(seq1_tokens) + else xpath_tags_seq_second_sequence + ) + overflowing_token_xpath_tags_seq_longest_sequence_fast = ( + overflowing_token_xpath_tags_seq_first_sequence_fast + if len(seq0_tokens) > len(seq1_tokens) + else overflowing_token_xpath_tags_seq_second_sequence_fast + ) + + # Overflowing tokens are handled quite differently in slow and fast tokenizers + if isinstance(tokenizer, MarkupLMTokenizerFast): + information = tokenizer( + question_0, + seq_1, + xpaths=xpaths_1, + max_length=len(sequence["input_ids"]) - 2, + add_special_tokens=False, + stride=stride, + truncation="longest_first", + return_overflowing_tokens=True, + # add_prefix_space=False, + ) + truncated_sequence = information["input_ids"][0] + overflowing_tokens = information["input_ids"][1] + xpath_tags_seq = information["xpath_tags_seq"][0] + overflowing_xpath_tags_seq = information["xpath_tags_seq"][1] + self.assertEqual(len(information["input_ids"]), 2) + + self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) + self.assertEqual(truncated_sequence, truncated_longest_sequence) + + self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest)) + self.assertEqual(overflowing_tokens, overflow_longest_sequence) + self.assertEqual(xpath_tags_seq, xpath_tags_seq_longest_sequence) + + self.assertEqual(len(overflowing_xpath_tags_seq), 2 + stride + len(smallest)) + self.assertEqual( + overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_longest_sequence_fast + ) + else: + # No overflowing tokens when using 'longest' in python tokenizers + with self.assertRaises(ValueError) as context: + information = tokenizer( + question_0, + seq_1, + xpaths=xpaths_1, + max_length=len(sequence["input_ids"]) - 2, + add_special_tokens=False, + stride=stride, + truncation="longest_first", + return_overflowing_tokens=True, + # add_prefix_space=False, + ) + + self.assertTrue( + context.exception.args[0].startswith( + "Not possible to return overflowing tokens for pair of sequences with the " + "`longest_first`. Please select another truncation strategy than `longest_first`, " + "for instance `only_second` or `only_first`." + ) + ) + + # Overflowing tokens are handled quite differently in slow and fast tokenizers + if isinstance(tokenizer, MarkupLMTokenizerFast): + information = tokenizer( + question_0, + seq_1, + xpaths=xpaths_1, + max_length=len(sequence["input_ids"]) - 2, + add_special_tokens=False, + stride=stride, + truncation=True, + return_overflowing_tokens=True, + ) + truncated_sequence = information["input_ids"][0] + overflowing_tokens = information["input_ids"][1] + xpath_tags_seq = information["xpath_tags_seq"][0] + overflowing_xpath_tags_seq = information["xpath_tags_seq"][1] + self.assertEqual(len(information["input_ids"]), 2) + + self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) + self.assertEqual(truncated_sequence, truncated_longest_sequence) + + self.assertEqual(len(overflowing_tokens), 2 + stride + len(smallest)) + self.assertEqual(overflowing_tokens, overflow_longest_sequence) + self.assertEqual(xpath_tags_seq, xpath_tags_seq_longest_sequence) + self.assertEqual( + overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_longest_sequence_fast + ) + else: + # No overflowing tokens when using 'longest' in python tokenizers + with self.assertRaises(ValueError) as context: + information = tokenizer( + question_0, + seq_1, + xpaths=xpaths_1, + max_length=len(sequence["input_ids"]) - 2, + add_special_tokens=False, + stride=stride, + truncation=True, + return_overflowing_tokens=True, + ) + + self.assertTrue( + context.exception.args[0].startswith( + "Not possible to return overflowing tokens for pair of sequences with the " + "`longest_first`. Please select another truncation strategy than `longest_first`, " + "for instance `only_second` or `only_first`." + ) + ) + + information_first_truncated = tokenizer( + question_0, + seq_1, + xpaths=xpaths_1, + max_length=len(sequence["input_ids"]) - 2, + add_special_tokens=False, + stride=stride, + truncation="only_first", + return_overflowing_tokens=True, + ) + # Overflowing tokens are handled quite differently in slow and fast tokenizers + if isinstance(tokenizer, MarkupLMTokenizerFast): + truncated_sequence = information_first_truncated["input_ids"][0] + overflowing_tokens = information_first_truncated["input_ids"][1] + xpath_tags_seq = information_first_truncated["xpath_tags_seq"][0] + overflowing_xpath_tags_seq = information_first_truncated["xpath_tags_seq"][1] + self.assertEqual(len(information_first_truncated["input_ids"]), 2) + + self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) + self.assertEqual(truncated_sequence, truncated_first_sequence) + + self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq1_tokens["input_ids"])) + self.assertEqual(overflowing_tokens, overflow_first_sequence) + self.assertEqual(xpath_tags_seq, xpath_tags_seq_first_sequence) + # ISSUE HAPPENS HERE ↓ + self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_first_sequence_fast) + else: + truncated_sequence = information_first_truncated["input_ids"] + overflowing_tokens = information_first_truncated["overflowing_tokens"] + overflowing_xpath_tags_seq = information_first_truncated["overflowing_xpath_tags_seq"] + xpath_tags_seq = information_first_truncated["xpath_tags_seq"] + + self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) + self.assertEqual(truncated_sequence, truncated_first_sequence) + + self.assertEqual(len(overflowing_tokens), 2 + stride) + self.assertEqual(overflowing_tokens, seq0_tokens["input_ids"][-(2 + stride) :]) + self.assertEqual(xpath_tags_seq, xpath_tags_seq_first_sequence) + self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_first_sequence_slow) + + information_second_truncated = tokenizer( + question_0, + seq_1, + xpaths=xpaths_1, + max_length=len(sequence["input_ids"]) - 2, + add_special_tokens=False, + stride=stride, + truncation="only_second", + return_overflowing_tokens=True, + # add_prefix_space=False, + ) + # Overflowing tokens are handled quite differently in slow and fast tokenizers + if isinstance(tokenizer, MarkupLMTokenizerFast): + truncated_sequence = information_second_truncated["input_ids"][0] + overflowing_tokens = information_second_truncated["input_ids"][1] + xpath_tags_seq = information_second_truncated["xpath_tags_seq"][0] + overflowing_xpath_tags_seq = information_second_truncated["xpath_tags_seq"][1] + + self.assertEqual(len(information_second_truncated["input_ids"]), 2) + + self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) + self.assertEqual(truncated_sequence, truncated_second_sequence) + + self.assertEqual(len(overflowing_tokens), 2 + stride + len(seq0_tokens["input_ids"])) + self.assertEqual(overflowing_tokens, overflow_second_sequence) + self.assertEqual(xpath_tags_seq, xpath_tags_seq_second_sequence) + self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_second_sequence_fast) + else: + truncated_sequence = information_second_truncated["input_ids"] + overflowing_tokens = information_second_truncated["overflowing_tokens"] + xpath_tags_seq = information_second_truncated["xpath_tags_seq"] + overflowing_xpath_tags_seq = information_second_truncated["overflowing_xpath_tags_seq"] + + self.assertEqual(len(truncated_sequence), len(sequence["input_ids"]) - 2) + self.assertEqual(truncated_sequence, truncated_second_sequence) + + self.assertEqual(len(overflowing_tokens), 2 + stride) + self.assertEqual(overflowing_tokens, seq1_tokens["input_ids"][-(2 + stride) :]) + self.assertEqual(xpath_tags_seq, xpath_tags_seq_second_sequence) + self.assertEqual(overflowing_xpath_tags_seq, overflowing_token_xpath_tags_seq_second_sequence_slow) + + def test_maximum_encoding_length_single_input(self): + tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + seq_0, xpaths_0, ids = self.get_clean_sequence(tokenizer, max_length=20) + + sequence = tokenizer(seq_0, xpaths=xpaths_0, add_special_tokens=False) + total_length = len(sequence["input_ids"]) + + self.assertGreater(total_length, 4, "Issue with the testing sequence, please update it it's too short") + + # Test with max model input length + model_max_length = tokenizer.model_max_length + self.assertEqual(model_max_length, 100) + seq_1 = seq_0 * model_max_length + xpaths_1 = xpaths_0 * model_max_length + sequence1 = tokenizer(seq_1, xpaths=xpaths_1, add_special_tokens=False) + total_length1 = len(sequence1["input_ids"]) + self.assertGreater( + total_length1, model_max_length, "Issue with the testing sequence, please update it it's too short" + ) + + # Simple + padding_strategies = ( + [False, True, "longest"] if tokenizer.pad_token and tokenizer.pad_token_id >= 0 else [False] + ) + for padding_state in padding_strategies: + with self.subTest(f"Padding: {padding_state}"): + for truncation_state in [True, "longest_first", "only_first"]: + with self.subTest(f"Truncation: {truncation_state}"): + output = tokenizer( + seq_1, + xpaths=xpaths_1, + padding=padding_state, + truncation=truncation_state, + ) + self.assertEqual(len(output["input_ids"]), model_max_length) + self.assertEqual(len(output["xpath_tags_seq"]), model_max_length) + self.assertEqual(len(output["xpath_subs_seq"]), model_max_length) + + output = tokenizer( + [seq_1], + xpaths=[xpaths_1], + padding=padding_state, + truncation=truncation_state, + ) + self.assertEqual(len(output["input_ids"][0]), model_max_length) + self.assertEqual(len(output["xpath_tags_seq"][0]), model_max_length) + self.assertEqual(len(output["xpath_subs_seq"][0]), model_max_length) + + # Simple with no truncation + # Reset warnings + tokenizer.deprecation_warnings = {} + with self.assertLogs("transformers", level="WARNING") as cm: + output = tokenizer(seq_1, xpaths=xpaths_1, padding=padding_state, truncation=False) + self.assertNotEqual(len(output["input_ids"]), model_max_length) + self.assertNotEqual(len(output["xpath_tags_seq"]), model_max_length) + self.assertNotEqual(len(output["xpath_subs_seq"]), model_max_length) + self.assertEqual(len(cm.records), 1) + self.assertTrue( + cm.records[0].message.startswith( + "Token indices sequence length is longer than the specified maximum sequence length" + " for this model" + ) + ) + + tokenizer.deprecation_warnings = {} + with self.assertLogs("transformers", level="WARNING") as cm: + output = tokenizer([seq_1], xpaths=[xpaths_1], padding=padding_state, truncation=False) + self.assertNotEqual(len(output["input_ids"][0]), model_max_length) + self.assertNotEqual(len(output["xpath_tags_seq"][0]), model_max_length) + self.assertNotEqual(len(output["xpath_subs_seq"][0]), model_max_length) + self.assertEqual(len(cm.records), 1) + self.assertTrue( + cm.records[0].message.startswith( + "Token indices sequence length is longer than the specified maximum sequence length" + " for this model" + ) + ) + # Check the order of Sequence of input ids, overflowing tokens, xpath_tags_seq and xpath_subs_seq sequence with truncation + stride = 2 + information = tokenizer( + seq_0, + xpaths=xpaths_0, + max_length=total_length - 2, + add_special_tokens=False, + stride=stride, + truncation=True, + return_overflowing_tokens=True, + ) + + # Overflowing tokens are handled quite differently in slow and fast tokenizers + if isinstance(tokenizer, MarkupLMTokenizerFast): + truncated_sequence = information["input_ids"][0] + overflowing_tokens = information["input_ids"][1] + xpath_tags_seq = information["xpath_tags_seq"][0] + overflowing_xpath_tags_seq = information["xpath_tags_seq"][1] + self.assertEqual(len(information["input_ids"]), 2) + + self.assertEqual(len(truncated_sequence), total_length - 2) + self.assertEqual(truncated_sequence, sequence["input_ids"][:-2]) + + self.assertEqual(len(overflowing_tokens), 2 + stride) + self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :]) + + self.assertEqual(xpath_tags_seq, sequence["xpath_tags_seq"][:-2]) + self.assertEqual(overflowing_xpath_tags_seq, sequence["xpath_tags_seq"][-(2 + stride) :]) + else: + truncated_sequence = information["input_ids"] + overflowing_tokens = information["overflowing_tokens"] + xpath_tags_seq = information["xpath_tags_seq"] + overflowing_xpath_tags_seq = information["overflowing_xpath_tags_seq"] + self.assertEqual(len(truncated_sequence), total_length - 2) + self.assertEqual(truncated_sequence, sequence["input_ids"][:-2]) + + self.assertEqual(len(overflowing_tokens), 2 + stride) + self.assertEqual(overflowing_tokens, sequence["input_ids"][-(2 + stride) :]) + self.assertEqual(xpath_tags_seq, sequence["xpath_tags_seq"][:-2]) + self.assertEqual(overflowing_xpath_tags_seq, sequence["xpath_tags_seq"][-(2 + stride) :]) + + @unittest.skip("MarkupLM tokenizer requires xpaths besides sequences.") + def test_pretokenized_inputs(self): + pass + + @unittest.skip("MarkupLM tokenizer always expects pretokenized inputs.") + def test_compare_pretokenized_inputs(self): + pass + + @unittest.skip("MarkupLM fast tokenizer does not support prepare_for_model") + def test_compare_prepare_for_model(self): + pass + + @slow + def test_only_label_first_subword(self): + nodes = ["hello", "niels"] + xpaths = ["/html/body/div/li[1]/div/span" for _ in range(len(nodes))] + node_labels = [0, 1] + + # test slow tokenizer + tokenizer_p = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base") + encoding = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels) + self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100]) + + tokenizer_p = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base", only_label_first_subword=False) + encoding = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels) + self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100]) + + # test fast tokenizer + tokenizer_r = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base") + encoding = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels) + self.assertListEqual(encoding.labels, [-100, 0, 1, -100, -100]) + + tokenizer_r = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base", only_label_first_subword=False) + encoding = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels) + self.assertListEqual(encoding.labels, [-100, 0, 1, 1, -100]) + + def test_markuplm_integration_test(self): + tokenizer_p = MarkupLMTokenizer.from_pretrained("microsoft/markuplm-base") + tokenizer_r = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base") + + # There are 3 cases: + # CASE 1: document image classification (training + inference), document image token classification (inference), + # in which case only nodes and normalized bounding xpaths are provided to the tokenizer + # CASE 2: document image token classification (training), + # in which case one also provides word labels to the tokenizer + # CASE 3: document image visual question answering (inference), + # in which case one also provides a question to the tokenizer + + # We need to test all 3 cases both on batched and non-batched inputs. + + # CASE 1: not batched + nodes, xpaths = self.get_nodes_and_xpaths() + + # fmt: off + expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} + # fmt: on + + encoding_p = tokenizer_p(nodes, xpaths=xpaths, padding="max_length", max_length=20) + encoding_r = tokenizer_r(nodes, xpaths=xpaths, padding="max_length", max_length=20) + self.assertDictEqual(dict(encoding_p), expected_results) + self.assertDictEqual(dict(encoding_r), expected_results) + + # CASE 1: batched + nodes, xpaths = self.get_nodes_and_xpaths_batch() + + # fmt: off + expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} + # fmt: on + + encoding_p = tokenizer_p(nodes, xpaths=xpaths, padding="max_length", max_length=20) + encoding_r = tokenizer_r(nodes, xpaths=xpaths, padding="max_length", max_length=20) + self.assertDictEqual(dict(encoding_p), expected_results) + self.assertDictEqual(dict(encoding_r), expected_results) + + # CASE 2: not batched + nodes, xpaths = self.get_nodes_and_xpaths() + node_labels = [1, 2, 3] + + # fmt: off + expected_results = {'input_ids': [0, 42891, 8331, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'labels': [-100, 1, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], 'attention_mask': [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} + # fmt: on + + encoding_p = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20) + encoding_r = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20) + self.assertDictEqual(dict(encoding_p), expected_results) + self.assertDictEqual(dict(encoding_r), expected_results) + + # CASE 2: batched + nodes, xpaths = self.get_nodes_and_xpaths_batch() + node_labels = [[1, 2, 3], [2, 46, 17, 22, 3]] + + # fmt: off + expected_results = {'input_ids': [[0, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'labels': [[-100, 1, -100, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100], [-100, 2, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100]], 'attention_mask': [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} + # fmt: on + + encoding_p = tokenizer_p(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20) + encoding_r = tokenizer_r(nodes, xpaths=xpaths, node_labels=node_labels, padding="max_length", max_length=20) + self.assertDictEqual(dict(encoding_p), expected_results) + self.assertDictEqual(dict(encoding_r), expected_results) + + # CASE 3: not batched + question, nodes, xpaths = self.get_question_nodes_and_xpaths() + + # fmt: off + expected_results = {'input_ids': [0, 12196, 18, 39, 766, 116, 2, 42891, 232, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], + 'xpath_tags_seq': [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], 'xpath_subs_seq': [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} + # fmt: on + + encoding_p = tokenizer_p(question, nodes, xpaths, padding="max_length", max_length=20) + encoding_r = tokenizer_r(question, nodes, xpaths, padding="max_length", max_length=20) + self.assertDictEqual(dict(encoding_p), expected_results) + self.assertDictEqual(dict(encoding_r), expected_results) + + # CASE 3: batched + questions, nodes, xpaths = self.get_question_nodes_and_xpaths_batch() + + # fmt: off + expected_results = {'input_ids': [[0, 12196, 18, 39, 766, 116, 2, 42891, 232, 12364, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 9178, 16, 37, 373, 116, 2, 42891, 127, 766, 16, 22401, 2, 1, 1, 1, 1, 1, 1, 1]], + 'xpath_tags_seq': [[[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]], [[216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [109, 25, 50, 120, 50, 178, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216], [216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216]]], + 'xpath_subs_seq': [[[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]], [[1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [109, 25, 50, 120, 50, 178, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001], [1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001, 1001]]], + 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]]} + # fmt: on + + encoding_p = tokenizer_p(questions, nodes, xpaths, padding="max_length", max_length=20) + encoding_r = tokenizer_r(questions, nodes, xpaths, padding="max_length", max_length=20) + self.assertDictEqual(dict(encoding_p), expected_results) + self.assertDictEqual(dict(encoding_r), expected_results) + + @unittest.skip("Doesn't support another framework than PyTorch") + def test_np_encode_plus_sent_to_model(self): + pass + + def test_padding_warning_message_fast_tokenizer(self): + if not self.test_rust_tokenizer: + return + + nodes, xpaths = self.get_nodes_and_xpaths() + + tokenizer_fast = self.get_rust_tokenizer() + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer_fast, nodes) + + encoding_fast = tokenizer_fast(nodes, xpaths=xpaths) + + with self.assertLogs("transformers", level="WARNING") as cm: + tokenizer_fast.pad(encoding_fast) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to" + " encode the text followed by a call to the `pad` method to get a padded encoding.", + cm.records[0].message, + ) + + if not self.test_slow_tokenizer: + return + + tokenizer_slow = self.get_tokenizer() + # check correct behaviour if no pad_token_id exists and add it eventually + self._check_no_pad_token_padding(tokenizer_slow, nodes) + + encoding_slow = tokenizer_slow(nodes, xpaths=xpaths) + + with self.assertLogs(level="WARNING") as cm: + # We want to assert there are no warnings, but the 'assertLogs' method does not support that. + # Therefore, we are adding a dummy warning, and then we will assert it is the only warning. + logger.warning("Dummy warning") + tokenizer_slow.pad(encoding_slow) + self.assertEqual(len(cm.records), 1) + self.assertIn( + "Dummy warning", + cm.records[0].message, + ) diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index eb1570d6c3145a..06b042c1fc0fb1 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -3,6 +3,7 @@ docs/source/es/quicktour.mdx docs/source/en/pipeline_tutorial.mdx docs/source/en/autoclass_tutorial.mdx docs/source/en/task_summary.mdx +docs/source/en/model_doc/markuplm.mdx docs/source/en/model_doc/speech_to_text.mdx docs/source/en/model_doc/t5.mdx docs/source/en/model_doc/t5v1.1.mdx @@ -51,6 +52,7 @@ src/transformers/models/longformer/modeling_longformer.py src/transformers/models/longformer/modeling_tf_longformer.py src/transformers/models/longt5/modeling_longt5.py src/transformers/models/marian/modeling_marian.py +src/transformers/models/markuplm/modeling_markuplm.py src/transformers/models/mbart/modeling_mbart.py src/transformers/models/mobilebert/modeling_mobilebert.py src/transformers/models/mobilebert/modeling_tf_mobilebert.py From 4fd32a1f499e45f009c2c0dea4d81c321cba7e02 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 30 Sep 2022 13:45:56 +0200 Subject: [PATCH 425/539] Catch `HFValidationError` in `TrainingSummary` (#19252) * Catch HfValidationError in TrainingSummary Co-authored-by: ydshieh --- src/transformers/modelcard.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/modelcard.py b/src/transformers/modelcard.py index 6743c5624eaf3c..cf8971d3d66b77 100644 --- a/src/transformers/modelcard.py +++ b/src/transformers/modelcard.py @@ -26,6 +26,7 @@ import requests import yaml from huggingface_hub import model_info +from huggingface_hub.utils import HFValidationError from . import __version__ from .models.auto.modeling_auto import ( @@ -378,7 +379,7 @@ def __post_init__(self): for tag in info.tags: if tag.startswith("license:"): self.license = tag[8:] - except requests.exceptions.HTTPError: + except (requests.exceptions.HTTPError, HFValidationError): pass def create_model_index(self, metric_mapping): From 368b649af6f66ce10e7aeeb3bfbaa9bb9f5be315 Mon Sep 17 00:00:00 2001 From: Matt Date: Fri, 30 Sep 2022 14:16:25 +0100 Subject: [PATCH 426/539] Rebase ESM PR and update all file formats (#19055) * Rebase ESM PR and update all file formats * Fix test relative imports * Add __init__.py to the test dir * Disable gradient checkpointing * Remove references to TFESM... FOR NOW >:| * Remove completed TODOs from tests * Convert docstrings to mdx, fix-copies from BERT * fix-copies for the README and index * Update ESM's __init__.py to the modern format * Add to _toctree.yml * Ensure we correctly copy the pad_token_id from the original ESM model * Ensure we correctly copy the pad_token_id from the original ESM model * Tiny grammar nitpicks * Make the layer norm after embeddings an optional flag * Make the layer norm after embeddings an optional flag * Update the conversion script to handle other model classes * Remove token_type_ids entirely, fix attention_masking and add checks to convert_esm.py * Break the copied from link from BertModel.forward to remove token_type_ids * Remove debug array saves * Begin ESM-2 porting * Add a hacky workaround for the precision issue in original repo * Code cleanup * Remove unused checkpoint conversion code * Remove unused checkpoint conversion code * Fix copyright notices * Get rid of all references to the TF weights conversion * Remove token_type_ids from the tests * Fix test code * Update src/transformers/__init__.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/__init__.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update README.md Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Add credit * Remove _ args and __ kwargs in rotary embedding * Assertively remove asserts * Replace einsum with torch.outer() * Fix docstring formatting * Remove assertions in tokenization * Add paper citation to ESMModel docstring * Move vocab list to single line * Remove ESMLayer from init * Add Facebook copyrights * Clean up RotaryEmbedding docstring * Fix docstring formatting * Fix docstring for config object * Add explanation for new config methods * make fix-copies * Rename all the ESM- classes to Esm- * Update conversion script to allow pushing to hub * Update tests to point at my repo for now * Set config properly for tests * Remove the gross hack that forced loss of precision in inv_freq and instead copy the data from the model being converted * make fixup * Update expected values for slow tests * make fixup * Remove EsmForCausalLM for now * Remove EsmForCausalLM for now * Fix padding idx test * Updated README and docs with ESM-1b and ESM-2 separately (#19221) * Updated README and docs with ESM-1b and ESM-2 separately * Update READMEs, longer entry with 3 citations * make fix-copies Co-authored-by: Your Name Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Tom Sercu Co-authored-by: Your Name --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/esm.mdx | 109 ++ src/transformers/__init__.py | 20 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + src/transformers/models/auto/modeling_auto.py | 4 + src/transformers/models/esm/__init__.py | 67 + .../models/esm/configuration_esm.py | 142 ++ src/transformers/models/esm/convert_esm.py | 264 ++++ src/transformers/models/esm/modeling_esm.py | 1241 +++++++++++++++++ .../models/esm/tokenization_esm.py | 106 ++ src/transformers/utils/dummy_pt_objects.py | 38 + tests/models/esm/__init__.py | 0 tests/models/esm/test_modeling_esm.py | 293 ++++ tests/models/esm/test_tokenization_esm.py | 91 ++ 20 files changed, 2387 insertions(+) create mode 100644 docs/source/en/model_doc/esm.mdx create mode 100644 src/transformers/models/esm/__init__.py create mode 100644 src/transformers/models/esm/configuration_esm.py create mode 100644 src/transformers/models/esm/convert_esm.py create mode 100755 src/transformers/models/esm/modeling_esm.py create mode 100644 src/transformers/models/esm/tokenization_esm.py create mode 100644 tests/models/esm/__init__.py create mode 100644 tests/models/esm/test_modeling_esm.py create mode 100644 tests/models/esm/test_tokenization_esm.py diff --git a/README.md b/README.md index ea026159803b7d..1d10617dbacf7d 100644 --- a/README.md +++ b/README.md @@ -300,6 +300,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. +1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. diff --git a/README_ko.md b/README_ko.md index e7a0d9d2960470..ca21d265f2f739 100644 --- a/README_ko.md +++ b/README_ko.md @@ -250,6 +250,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. +1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. diff --git a/README_zh-hans.md b/README_zh-hans.md index f3f1a5474c833c..af39a369679dcf 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -274,6 +274,7 @@ conda install -c huggingface transformers 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。 +1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (来自 CNRS) 伴随论文 [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) 由 Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab 发布。 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (来自 Facebook AI) 伴随论文 [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) 由 Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela 发布。 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (来自 Google Research) 伴随论文 [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) 由 James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 43e8a05372cd07..3a3122af87b7a1 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -286,6 +286,7 @@ conda install -c huggingface transformers 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. +1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 644778e155c978..6e0f764465c73e 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -241,6 +241,8 @@ title: Encoder Decoder Models - local: model_doc/ernie title: ERNIE + - local: model_doc/esm + title: ESM - local: model_doc/flaubert title: FlauBERT - local: model_doc/fnet diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 652c5bc77b8ee9..8b5defb96e725e 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -90,6 +90,7 @@ The documentation is organized into five sections: 1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. +1. **[ESM](model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. 1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. @@ -239,6 +240,7 @@ Flax), PyTorch, and/or TensorFlow. | ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ | | Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ | | ERNIE | ❌ | ❌ | ✅ | ❌ | ❌ | +| ESM | ✅ | ❌ | ✅ | ❌ | ❌ | | FairSeq Machine-Translation | ✅ | ❌ | ✅ | ❌ | ❌ | | FlauBERT | ✅ | ❌ | ✅ | ✅ | ❌ | | FLAVA | ❌ | ❌ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/esm.mdx b/docs/source/en/model_doc/esm.mdx new file mode 100644 index 00000000000000..d2fc949781bc22 --- /dev/null +++ b/docs/source/en/model_doc/esm.mdx @@ -0,0 +1,109 @@ + + +# ESM + +## Overview +This page provides code and pre-trained weights for Transformer protein language models from Meta AI's Fundamental +AI Research Team, providing the state-of-the-art ESM-2, and the previously released ESM-1b and ESM-1v. Transformer +protein language models were introduced in the paper [Biological structure and function emerge from scaling +unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by +Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, +C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. +The first version of this paper was [preprinted in 2019](https://www.biorxiv.org/content/10.1101/622803v1?versioned=true). + +ESM-2 outperforms all tested single-sequence protein language models across a range of structure prediction tasks, +and enables atomic resolution structure prediction. +It was released with the paper [Language models of protein sequences at the scale of evolution enable accurate +structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, +Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido and Alexander Rives. + + +The abstract from +"Biological structure and function emerge from scaling unsupervised learning to 250 +million protein sequences" is + + +*In the field of artificial intelligence, a combination of scale in data and model capacity enabled by unsupervised +learning has led to major advances in representation learning and statistical generation. In the life sciences, the +anticipated growth of sequencing promises unprecedented data on natural sequence diversity. Protein language modeling +at the scale of evolution is a logical step toward predictive and generative artificial intelligence for biology. To +this end, we use unsupervised learning to train a deep contextual language model on 86 billion amino acids across 250 +million protein sequences spanning evolutionary diversity. The resulting model contains information about biological +properties in its representations. The representations are learned from sequence data alone. The learned representation +space has a multiscale organization reflecting structure from the level of biochemical properties of amino acids to +remote homology of proteins. Information about secondary and tertiary structure is encoded in the representations and +can be identified by linear projections. Representation learning produces features that generalize across a range of +applications, enabling state-of-the-art supervised prediction of mutational effect and secondary structure and +improving state-of-the-art features for long-range contact prediction.* + + +The abstract from +"Language models of protein sequences at the scale of evolution enable accurate structure prediction" is + +*Large language models have recently been shown to develop emergent capabilities with scale, going beyond +simple pattern matching to perform higher level reasoning and generate lifelike images and text. While +language models trained on protein sequences have been studied at a smaller scale, little is known about +what they learn about biology as they are scaled up. In this work we train models up to 15 billion parameters, +the largest language models of proteins to be evaluated to date. We find that as models are scaled they learn +information enabling the prediction of the three-dimensional structure of a protein at the resolution of +individual atoms. We present ESMFold for high accuracy end-to-end atomic level structure prediction directly +from the individual sequence of a protein. ESMFold has similar accuracy to AlphaFold2 and RoseTTAFold for +sequences with low perplexity that are well understood by the language model. ESMFold inference is an +order of magnitude faster than AlphaFold2, enabling exploration of the structural space of metagenomic +proteins in practical timescales.* + + + + +Tips: + +- ESM models are trained with a masked language modeling (MLM) objective. + +The original code can be found [here](https://github.com/facebookresearch/esm) and was +was developed by the Fundamental AI Research team at Meta AI. +This model was contributed to huggingface by [jasonliu](https://huggingface.co/jasonliu) +and [Matt](https://huggingface.co/Rocketknight1). + +## EsmConfig + +[[autodoc]] EsmConfig + - all + +## EsmTokenizer + +[[autodoc]] EsmTokenizer + - build_inputs_with_special_tokens + - get_special_tokens_mask + - create_token_type_ids_from_sequences + - save_vocabulary + + +## EsmModel + +[[autodoc]] EsmModel + - forward + +## EsmForMaskedLM + +[[autodoc]] EsmForMaskedLM + - forward + +## EsmForSequenceClassification + +[[autodoc]] EsmForSequenceClassification + - forward + +## EsmForTokenClassification + +[[autodoc]] EsmForTokenClassification + - forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 6478bcd7e5b544..93c3118f691909 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -210,6 +210,7 @@ "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", ], + "models.esm": ["ESM_PRETRAINED_CONFIG_ARCHIVE_MAP", "EsmConfig", "EsmTokenizer"], "models.flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertTokenizer"], "models.flava": [ "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", @@ -1220,6 +1221,16 @@ "ErniePreTrainedModel", ] ) + _import_structure["models.esm"].extend( + [ + "ESM_PRETRAINED_MODEL_ARCHIVE_LIST", + "EsmForMaskedLM", + "EsmForSequenceClassification", + "EsmForTokenClassification", + "EsmModel", + "EsmPreTrainedModel", + ] + ) _import_structure["models.flaubert"].extend( [ "FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -3158,6 +3169,7 @@ from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer from .models.encoder_decoder import EncoderDecoderConfig from .models.ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig + from .models.esm import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP, EsmConfig, EsmTokenizer from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer from .models.flava import ( FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, @@ -4010,6 +4022,14 @@ ErnieModel, ErniePreTrainedModel, ) + from .models.esm import ( + ESM_PRETRAINED_MODEL_ARCHIVE_LIST, + EsmForMaskedLM, + EsmForSequenceClassification, + EsmForTokenClassification, + EsmModel, + EsmPreTrainedModel, + ) from .models.flaubert import ( FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertForMultipleChoice, diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index 261d4c03e2369f..e30c79480e09d3 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -60,6 +60,7 @@ electra, encoder_decoder, ernie, + esm, flaubert, flava, fnet, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 781641b74edf92..bf8c3619fb7a42 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -64,6 +64,7 @@ ("electra", "ElectraConfig"), ("encoder-decoder", "EncoderDecoderConfig"), ("ernie", "ErnieConfig"), + ("esm", "EsmConfig"), ("flaubert", "FlaubertConfig"), ("flava", "FlavaConfig"), ("fnet", "FNetConfig"), @@ -197,6 +198,7 @@ ("dpt", "DPT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("electra", "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("ernie", "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("esm", "ESM_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("flaubert", "FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("flava", "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("fnet", "FNET_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -331,6 +333,7 @@ ("electra", "ELECTRA"), ("encoder-decoder", "Encoder decoder"), ("ernie", "ERNIE"), + ("esm", "ESM"), ("flaubert", "FlauBERT"), ("flava", "FLAVA"), ("fnet", "FNet"), diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index d703c5b22a6c0f..dba5af0191d16a 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -63,6 +63,7 @@ ("dpt", "DPTModel"), ("electra", "ElectraModel"), ("ernie", "ErnieModel"), + ("esm", "EsmModel"), ("flaubert", "FlaubertModel"), ("flava", "FlavaModel"), ("fnet", "FNetModel"), @@ -231,6 +232,7 @@ ("electra", "ElectraForMaskedLM"), ("encoder-decoder", "EncoderDecoderModel"), ("ernie", "ErnieForMaskedLM"), + ("esm", "EsmForMaskedLM"), ("flaubert", "FlaubertWithLMHeadModel"), ("fnet", "FNetForMaskedLM"), ("fsmt", "FSMTForConditionalGeneration"), @@ -519,6 +521,7 @@ ("distilbert", "DistilBertForSequenceClassification"), ("electra", "ElectraForSequenceClassification"), ("ernie", "ErnieForSequenceClassification"), + ("esm", "EsmForSequenceClassification"), ("flaubert", "FlaubertForSequenceClassification"), ("fnet", "FNetForSequenceClassification"), ("funnel", "FunnelForSequenceClassification"), @@ -648,6 +651,7 @@ ("distilbert", "DistilBertForTokenClassification"), ("electra", "ElectraForTokenClassification"), ("ernie", "ErnieForTokenClassification"), + ("esm", "EsmForTokenClassification"), ("flaubert", "FlaubertForTokenClassification"), ("fnet", "FNetForTokenClassification"), ("funnel", "FunnelForTokenClassification"), diff --git a/src/transformers/models/esm/__init__.py b/src/transformers/models/esm/__init__.py new file mode 100644 index 00000000000000..9d1d0687726e23 --- /dev/null +++ b/src/transformers/models/esm/__init__.py @@ -0,0 +1,67 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 Facebook and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = { + "configuration_esm": ["ESM_PRETRAINED_CONFIG_ARCHIVE_MAP", "EsmConfig"], + "tokenization_esm": ["EsmTokenizer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_esm"] = [ + "ESM_PRETRAINED_MODEL_ARCHIVE_LIST", + "EsmForMaskedLM", + "EsmForSequenceClassification", + "EsmForTokenClassification", + "EsmModel", + "EsmPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_esm import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP, EsmConfig + from .tokenization_esm import EsmTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_esm import ( + ESM_PRETRAINED_MODEL_ARCHIVE_LIST, + EsmForMaskedLM, + EsmForSequenceClassification, + EsmForTokenClassification, + EsmModel, + EsmPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure) diff --git a/src/transformers/models/esm/configuration_esm.py b/src/transformers/models/esm/configuration_esm.py new file mode 100644 index 00000000000000..6b8f241ffb581f --- /dev/null +++ b/src/transformers/models/esm/configuration_esm.py @@ -0,0 +1,142 @@ +# coding=utf-8 +# Copyright 2021 Facebook and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" ESM model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +ESM_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "facebook/esm1b": "https://huggingface.co/facebook/esm1b/resolve/main/config.json", + # See all ESM models at https://huggingface.co/models?filter=esm +} + + +class EsmConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ESMModel`]. It is used to instantiate a ESM model + according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the ESM + [esm-base-uncased](https://huggingface.co/esm-base-uncased) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*): + Vocabulary size of the ESM model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`ESMModel`]. + mask_token_id (`int`, *optional*): + The index of the mask token in the vocabulary. This must be included in the config because of the + "mask-dropout" scaling trick, which will scale the inputs depending on the number of masked tokens. + pad_token_id (`int`, *optional*): + The index of the padding token in the vocabulary. This must be included in the config because certain parts + of the ESM code use this instead of the attention mask. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 1026): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query", "rotary"`. + For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + emb_layer_norm_before (`bool`, *optional*): + Whether to apply layer normalization after embeddings but before the main stem of the network. + token_dropout (`bool`, defaults to `False`): + When this is enabled, masked tokens are treated as if they had been dropped out by input dropout. + + Examples: + + ```python + >>> from transformers import EsmModel, EsmConfig + + >>> # Initializing a ESM esm-base-uncased style configuration >>> configuration = EsmConfig() + + >>> # Initializing a model from the configuration >>> model = ESMModel(configuration) + + >>> # Accessing the model configuration >>> configuration = model.config + ```""" + model_type = "esm" + + def __init__( + self, + vocab_size=None, + mask_token_id=None, + pad_token_id=None, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=1026, + initializer_range=0.02, + layer_norm_eps=1e-12, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, + emb_layer_norm_before=None, + token_dropout=False, + **kwargs + ): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + self.emb_layer_norm_before = emb_layer_norm_before + self.token_dropout = token_dropout + self.mask_token_id = mask_token_id + self.pad_token_id = pad_token_id diff --git a/src/transformers/models/esm/convert_esm.py b/src/transformers/models/esm/convert_esm.py new file mode 100644 index 00000000000000..20d2586a8786a5 --- /dev/null +++ b/src/transformers/models/esm/convert_esm.py @@ -0,0 +1,264 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert ESM checkpoint.""" + + +import argparse +import pathlib +from pathlib import Path +from tempfile import TemporaryDirectory + +import torch + +import esm as esm_module +from transformers.models.esm.configuration_esm import EsmConfig +from transformers.models.esm.modeling_esm import ( + EsmForMaskedLM, + EsmForSequenceClassification, + EsmIntermediate, + EsmLayer, + EsmOutput, + EsmSelfAttention, + EsmSelfOutput, +) +from transformers.models.esm.tokenization_esm import EsmTokenizer +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + +SAMPLE_DATA = [ + ("protein1", "MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLAGG"), + ("protein2", "MKTVRQERLKSIVRILERSKEPVSGAQLAEELSVSRQVIVQDIAYLRSLGYNIVATPRGYVLA"), + ("protein3", "MKTVRQERLKSIRILERSKEPVSGAQLAEELSSRQVIVQDIAYLRSLGYNVATPRGYVLAGG"), + ("protein4", "MKTVRQERLKSIRILERSKEPVSGAQLAEELSSRQVIVQDIAYLRSLGYNVATPRGYVLA"), +] + +MODEL_MAPPING = { + "esm1b_t33_650M_UR50S": esm_module.pretrained.esm1b_t33_650M_UR50S, + "esm1v_t33_650M_UR90S_1": esm_module.pretrained.esm1v_t33_650M_UR90S_1, + "esm1v_t33_650M_UR90S_2": esm_module.pretrained.esm1v_t33_650M_UR90S_2, + "esm1v_t33_650M_UR90S_3": esm_module.pretrained.esm1v_t33_650M_UR90S_3, + "esm1v_t33_650M_UR90S_4": esm_module.pretrained.esm1v_t33_650M_UR90S_4, + "esm1v_t33_650M_UR90S_5": esm_module.pretrained.esm1v_t33_650M_UR90S_5, + "esm2_t48_15B_UR50D": esm_module.pretrained.esm2_t48_15B_UR50D, + "esm2_t36_3B_UR50D": esm_module.pretrained.esm2_t36_3B_UR50D, + "esm2_t33_650M_UR50D": esm_module.pretrained.esm2_t33_650M_UR50D, + "esm2_t30_150M_UR50D": esm_module.pretrained.esm2_t30_150M_UR50D, + "esm2_t12_35M_UR50D": esm_module.pretrained.esm2_t12_35M_UR50D, + "esm2_t6_8M_UR50D": esm_module.pretrained.esm2_t6_8M_UR50D, +} + + +def convert_esm_checkpoint_to_pytorch( + model: str, pytorch_dump_folder_path: str, classification_head: bool, push_to_repo: str, auth_token: str +): + """ + Copy/paste/tweak esm's weights to our BERT structure. + """ + esm, alphabet = MODEL_MAPPING[model]() + esm.eval() # disable dropout + esm_sent_encoder = esm + if hasattr(esm, "args"): + # Indicates an ESM-1b or ESM-1v model + embed_dim = esm.args.embed_dim + num_layers = esm.args.layers + num_attention_heads = esm.args.attention_heads + intermediate_size = esm.args.ffn_embed_dim + token_dropout = esm.args.token_dropout + emb_layer_norm_before = True if esm.emb_layer_norm_before else False + position_embedding_type = "absolute" + else: + # Indicates an ESM-2 model + embed_dim = esm.embed_dim + num_layers = esm.num_layers + num_attention_heads = esm.attention_heads + intermediate_size = 4 * embed_dim # This is hardcoded in ESM-2 + token_dropout = esm.token_dropout + emb_layer_norm_before = False # This code path does not exist in ESM-2 + position_embedding_type = "rotary" + + config = EsmConfig( + vocab_size=esm_sent_encoder.embed_tokens.num_embeddings, + mask_token_id=alphabet.mask_idx, + hidden_size=embed_dim, + num_hidden_layers=num_layers, + num_attention_heads=num_attention_heads, + intermediate_size=intermediate_size, + max_position_embeddings=1026, + layer_norm_eps=1e-5, # PyTorch default used in fairseq + attention_probs_dropout_prob=0.0, + hidden_dropout_prob=0.0, + pad_token_id=esm.padding_idx, + emb_layer_norm_before=emb_layer_norm_before, + token_dropout=token_dropout, + position_embedding_type=position_embedding_type, + ) + if classification_head: + config.num_labels = esm.classification_heads["mnli"].out_proj.weight.shape[0] + print("Our BERT config:", config) + + model = EsmForSequenceClassification(config) if classification_head else EsmForMaskedLM(config) + model.eval() + + # Now let's copy all the weights. + # Embeddings + model.esm.embeddings.word_embeddings.weight = esm_sent_encoder.embed_tokens.weight + if position_embedding_type == "absolute": + model.esm.embeddings.position_embeddings.weight = esm_sent_encoder.embed_positions.weight + + if config.emb_layer_norm_before: + model.esm.embeddings.layer_norm.weight = esm_sent_encoder.emb_layer_norm_before.weight + model.esm.embeddings.layer_norm.bias = esm_sent_encoder.emb_layer_norm_before.bias + + model.esm.encoder.emb_layer_norm_after.weight = esm_sent_encoder.emb_layer_norm_after.weight + model.esm.encoder.emb_layer_norm_after.bias = esm_sent_encoder.emb_layer_norm_after.bias + + for i in range(config.num_hidden_layers): + # Encoder: start of layer + layer: EsmLayer = model.esm.encoder.layer[i] + # esm_layer: TransformerSentenceEncoderLayer = esm_sent_encoder.layers[i] + esm_layer = esm_sent_encoder.layers[i] + + # self attention + self_attn: EsmSelfAttention = layer.attention.self + assert ( + esm_layer.self_attn.k_proj.weight.data.shape + == esm_layer.self_attn.q_proj.weight.data.shape + == esm_layer.self_attn.v_proj.weight.data.shape + == torch.Size((config.hidden_size, config.hidden_size)) + ) + + self_attn.query.weight.data = esm_layer.self_attn.q_proj.weight + self_attn.query.bias.data = esm_layer.self_attn.q_proj.bias + self_attn.key.weight.data = esm_layer.self_attn.k_proj.weight + self_attn.key.bias.data = esm_layer.self_attn.k_proj.bias + self_attn.value.weight.data = esm_layer.self_attn.v_proj.weight + self_attn.value.bias.data = esm_layer.self_attn.v_proj.bias + + if hasattr(esm_layer.self_attn, "rot_emb"): + # Matt: Although inv_freq is not a trainable weight, it is computed at model init and cached. + # During the training of ESM-2 the model was converted to float16 precision, which also converts + # the inv_freq tensor, and the loss of precision remains even if the model is loaded later as float32. + # If we recompute inv_freq without this loss of precision then we will get subtly different rotary + # embeddings, which are enough to cause significant discrepancies in model outputs. To avoid this, + # we make sure the new model copies the data from the old inv_freq. + self_attn.rotary_embeddings.inv_freq.data = esm_layer.self_attn.rot_emb.inv_freq + + # LayerNorm changes for pre-activation + layer.attention.LayerNorm.weight = esm_layer.self_attn_layer_norm.weight + layer.attention.LayerNorm.bias = esm_layer.self_attn_layer_norm.bias + layer.LayerNorm.weight = esm_layer.final_layer_norm.weight + layer.LayerNorm.bias = esm_layer.final_layer_norm.bias + + # self-attention output + self_output: EsmSelfOutput = layer.attention.output + assert self_output.dense.weight.shape == esm_layer.self_attn.out_proj.weight.shape + self_output.dense.weight = esm_layer.self_attn.out_proj.weight + self_output.dense.bias = esm_layer.self_attn.out_proj.bias + + # intermediate + intermediate: EsmIntermediate = layer.intermediate + assert intermediate.dense.weight.shape == esm_layer.fc1.weight.shape + intermediate.dense.weight = esm_layer.fc1.weight + intermediate.dense.bias = esm_layer.fc1.bias + + # output + bert_output: EsmOutput = layer.output + assert bert_output.dense.weight.shape == esm_layer.fc2.weight.shape + bert_output.dense.weight = esm_layer.fc2.weight + bert_output.dense.bias = esm_layer.fc2.bias + # end of layer + + if classification_head: + model.classifier.dense.weight = esm.esm.classification_heads["mnli"].dense.weight + model.classifier.dense.bias = esm.classification_heads["mnli"].dense.bias + model.classifier.out_proj.weight = esm.classification_heads["mnli"].out_proj.weight + model.classifier.out_proj.bias = esm.classification_heads["mnli"].out_proj.bias + else: + # LM Head + model.lm_head.dense.weight = esm.lm_head.dense.weight + model.lm_head.dense.bias = esm.lm_head.dense.bias + model.lm_head.layer_norm.weight = esm.lm_head.layer_norm.weight + model.lm_head.layer_norm.bias = esm.lm_head.layer_norm.bias + model.lm_head.decoder.weight = esm.lm_head.weight + model.lm_head.decoder.bias = esm.lm_head.bias + + # Let's check that we get the same results. + batch_converter = alphabet.get_batch_converter() + + # Prepare data (first 2 sequences from ESMStructuralSplitDataset superfamily / 4) + + batch_labels, batch_strs, batch_tokens = batch_converter(SAMPLE_DATA) + + # Prepare tokenizer and make sure it matches + with TemporaryDirectory() as tempdir: + vocab = "\n".join(alphabet.all_toks) + vocab_file = Path(tempdir) / "vocab.txt" + vocab_file.write_text(vocab) + hf_tokenizer = EsmTokenizer(vocab_file=str(vocab_file)) + + hf_tokens = hf_tokenizer([row[1] for row in SAMPLE_DATA], return_tensors="pt", padding=True) + success = torch.all(hf_tokens["input_ids"] == batch_tokens) + print("Do both models tokenizers output the same tokens?", "🔥" if success else "💩") + if not success: + raise Exception("Tokenization does not match!") + + with torch.no_grad(): + our_output = model(**hf_tokens, output_hidden_states=True) + our_output = our_output["logits"] + if classification_head: + their_output = esm.model.classification_heads["mnli"](esm.extract_features(batch_tokens)) + else: + their_output = esm(batch_tokens, repr_layers=list(range(999))) + their_output = their_output["logits"] + print(our_output.shape, their_output.shape) + max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item() + print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5 + success = torch.allclose(our_output, their_output, atol=3e-4) + print("Do both models output the same tensors?", "🔥" if success else "💩") + + if not success: + raise Exception("Something went wRoNg") + + pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True) + print(f"Saving model to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + + print(f"Saving tokenizer to {pytorch_dump_folder_path}") + hf_tokenizer.save_pretrained(pytorch_dump_folder_path) + + if push_to_repo: + model.push_to_hub(repo_id=push_to_repo, use_auth_token=auth_token) + hf_tokenizer.push_to_hub(repo_id=push_to_repo, use_auth_token=auth_token) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--pytorch_dump_folder_path", type=str, required=True, help="Path to the output PyTorch model." + ) + parser.add_argument( + "--classification_head", action="store_true", help="Whether to convert a final classification head." + ) + parser.add_argument("--model", default=None, type=str, required=True, help="Name of model to convert.") + parser.add_argument("--push_to_repo", type=str, help="Repo to upload to (including username!).") + parser.add_argument("--auth_token", type=str, help="HuggingFace auth token.") + args = parser.parse_args() + convert_esm_checkpoint_to_pytorch( + args.model, args.pytorch_dump_folder_path, args.classification_head, args.push_to_repo, args.auth_token + ) diff --git a/src/transformers/models/esm/modeling_esm.py b/src/transformers/models/esm/modeling_esm.py new file mode 100755 index 00000000000000..337f7e37165428 --- /dev/null +++ b/src/transformers/models/esm/modeling_esm.py @@ -0,0 +1,1241 @@ +# coding=utf-8 +# Copyright 2022 Facebook and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch ESM model.""" + +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN, gelu +from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward +from ...modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + MaskedLMOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import ( + PreTrainedModel, + apply_chunking_to_forward, + find_pruneable_heads_and_indices, + prune_linear_layer, +) +from ...utils import logging +from .configuration_esm import EsmConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "facebook/esm-1b" +_CONFIG_FOR_DOC = "EsmConfig" +_TOKENIZER_FOR_DOC = "EsmTokenizer" + +ESM_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "facebook/esm-1b", + # See all ESM models at https://huggingface.co/models?filter=esm +] + + +def rotate_half(x): + x1, x2 = x.chunk(2, dim=-1) + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(x, cos, sin): + cos = cos[:, :, : x.shape[-2], :] + sin = sin[:, :, : x.shape[-2], :] + + return (x * cos) + (rotate_half(x) * sin) + + +class RotaryEmbedding(torch.nn.Module): + """ + Rotary position embeddings based on those in + [RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation + matrices which depend on their relative positions. + """ + + def __init__(self, dim: int): + super().__init__() + # Generate and save the inverse frequency buffer (non trainable) + inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim)) + inv_freq = inv_freq + self.register_buffer("inv_freq", inv_freq) + + self._seq_len_cached = None + self._cos_cached = None + self._sin_cached = None + + def _update_cos_sin_tables(self, x, seq_dimension=2): + seq_len = x.shape[seq_dimension] + + # Reset the tables if the sequence length has changed, + # or if we're on a new device (possibly due to tracing for instance) + if seq_len != self._seq_len_cached or self._cos_cached.device != x.device: + self._seq_len_cached = seq_len + t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq) + freqs = torch.outer(t, self.inv_freq) + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + + self._cos_cached = emb.cos()[None, None, :, :] + self._sin_cached = emb.sin()[None, None, :, :] + + return self._cos_cached, self._sin_cached + + def forward(self, q: torch.Tensor, k: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2) + + return ( + apply_rotary_pos_emb(q, self._cos_cached, self._sin_cached), + apply_rotary_pos_emb(k, self._cos_cached, self._sin_cached), + ) + + +class EsmEmbeddings(nn.Module): + """ + Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. + """ + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + + if config.emb_layer_norm_before: + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + else: + self.layer_norm = None + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + + self.padding_idx = config.pad_token_id + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx + ) + self.token_dropout = config.token_dropout + self.mask_token_id = config.mask_token_id + + def forward( + self, input_ids=None, attention_mask=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if position_ids is None: + if input_ids is not None: + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) + else: + position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + # Note that if we want to support ESM-1 (not 1b!) in future then we need to support an + # embedding_scale factor here. + embeddings = inputs_embeds + + # Matt: ESM has the option to handle masking in MLM in a slightly unusual way. If the token_dropout + # flag is False then it is handled in the same was as BERT/RoBERTa. If it is set to True, however, + # masked tokens are treated as if they were selected for input dropout and zeroed out. + # This "mask-dropout" is compensated for when masked tokens are not present, by scaling embeddings by + # a factor of (fraction of unmasked tokens during training) / (fraction of unmasked tokens in sample). + # This is analogous to the way that dropout layers scale down outputs during evaluation when not + # actually dropping out values (or, equivalently, scale up their un-dropped outputs in training). + if self.token_dropout: + embeddings.masked_fill_((input_ids == self.mask_token_id).unsqueeze(-1), 0.0) + mask_ratio_train = 0.15 * 0.8 # Hardcoded as the ratio used in all ESM model training runs + src_lengths = attention_mask.sum(-1) + mask_ratio_observed = (input_ids == self.mask_token_id).sum(-1).float() / src_lengths + embeddings = embeddings * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None] + + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + + if self.layer_norm is not None: + embeddings = self.layer_norm(embeddings) + if attention_mask is not None: + embeddings = embeddings * attention_mask.unsqueeze(-1) + # Matt: I think this line was copied incorrectly from BERT, disabling it for now. + # embeddings = self.dropout(embeddings) + return embeddings + + def create_position_ids_from_inputs_embeds(self, inputs_embeds): + """ + We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. + + Args: + inputs_embeds: torch.Tensor + + Returns: torch.Tensor + """ + input_shape = inputs_embeds.size()[:-1] + sequence_length = input_shape[1] + + position_ids = torch.arange( + self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device + ) + return position_ids.unsqueeze(0).expand(input_shape) + + +class EsmSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + self.rotary_embeddings = None + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + elif self.position_embedding_type == "rotary": + self.rotary_embeddings = RotaryEmbedding(dim=self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + # Matt: Our BERT model (which this code was derived from) scales attention logits down by sqrt(head_dim). + # ESM scales the query down by the same factor instead. Modulo numerical stability these are equivalent, + # but not when rotary embeddings get involved. Therefore, we scale the query here to match the original + # ESM code and fix rotary embeddings. + query_layer = query_layer * self.attention_head_size**-0.5 + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + if self.position_embedding_type == "rotary": + query_layer, key_layer = self.rotary_embeddings(query_layer, key_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in EsmModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +class EsmSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states += input_tensor + return hidden_states + + +class EsmAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.self = EsmSelfAttention(config) + self.output = EsmSelfOutput(config) + self.pruned_heads = set() + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + hidden_states_ln = self.LayerNorm(hidden_states) + self_outputs = self.self( + hidden_states_ln, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate +class EsmIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class EsmOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states += input_tensor + return hidden_states + + +class EsmLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = EsmAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise RuntimeError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = EsmAttention(config) + self.intermediate = EsmIntermediate(config) + self.output = EsmOutput(config) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=None, + output_attentions=False, + ): + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise AttributeError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated" + " with cross-attention layers by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + attention_output_ln = self.LayerNorm(attention_output) + intermediate_output = self.intermediate(attention_output_ln) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class EsmEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([EsmLayer(config) for _ in range(config.num_hidden_layers)]) + self.emb_layer_norm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_values=None, + use_cache=None, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " + "`use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if self.emb_layer_norm_after: + hidden_states = self.emb_layer_norm_after(hidden_states) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler +class EsmPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class EsmPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = EsmConfig + base_model_prefix = "esm" + + # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def update_keys_to_ignore(self, config, del_keys_to_ignore): + """Remove some keys from ignore list""" + if not config.tie_word_embeddings: + # must make a new list, or the class variable gets modified! + self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore] + self._keys_to_ignore_on_load_missing = [ + k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore + ] + + +ESM_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`EsmConfig`]): Model configuration class with all the parameters of the + model. Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ESM_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`EsmTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare ESM Model transformer outputting raw hidden-states without any specific head on top.", + ESM_START_DOCSTRING, +) +class EsmModel(EsmPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, + Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set + to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + _keys_to_ignore_on_load_missing = [r"position_ids"] + supports_gradient_checkpointing = False + + # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Esm + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = EsmEmbeddings(config) + self.encoder = EsmEncoder(config) + + self.pooler = EsmPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, EsmEncoder): + module.gradient_checkpointing = value + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("(batch_size, sequence_length)")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings("""ESM Model with a `language modeling` head on top.""", ESM_START_DOCSTRING) +class EsmForMaskedLM(EsmPreTrainedModel): + _keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"] + _keys_to_ignore_on_load_unexpected = [r"pooler"] + + def __init__(self, config): + super().__init__(config) + + if config.is_decoder: + logger.warning( + "If you want to use `EsmForMaskedLM` make sure `config.is_decoder=False` for " + "bi-directional self-attention." + ) + + self.esm = EsmModel(config, add_pooling_layer=False) + self.lm_head = EsmLMHead(config) + + # The LM head weights require special treatment only when they are tied with the word embeddings + self.update_keys_to_ignore(config, ["lm_head.decoder.weight"]) + + self.init_weights() + + def get_output_embeddings(self): + return self.lm_head.decoder + + def set_output_embeddings(self, new_embeddings): + self.lm_head.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + mask="", + ) + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Used to hide legacy arguments that have been deprecated. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.esm( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + prediction_scores = self.lm_head(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class EsmLMHead(nn.Module): + """ESM Head for masked language modeling.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + + # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` + self.decoder.bias = self.bias + + def forward(self, features, **kwargs): + x = self.dense(features) + x = gelu(x) + x = self.layer_norm(x) + + # project back to size of vocabulary with bias + x = self.decoder(x) + + return x + + +@add_start_docstrings( + """ + ESM Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled + output) e.g. for GLUE tasks. + """, + ESM_START_DOCSTRING, +) +class EsmForSequenceClassification(EsmPreTrainedModel): + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.esm = EsmModel(config, add_pooling_layer=False) + self.classifier = EsmClassificationHead(config) + + self.init_weights() + + @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.esm( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + ESM Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + ESM_START_DOCSTRING, +) +class EsmForTokenClassification(EsmPreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.esm = EsmModel(config, add_pooling_layer=False) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + self.init_weights() + + @add_start_docstrings_to_model_forward(ESM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + labels=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.esm( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + # Only keep active parts of the loss + if attention_mask is not None: + active_loss = attention_mask.view(-1) == 1 + active_logits = logits.view(-1, self.num_labels) + active_labels = torch.where( + active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) + ) + loss = loss_fct(active_logits, active_labels) + else: + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class EsmClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.out_proj = nn.Linear(config.hidden_size, config.num_labels) + + def forward(self, features, **kwargs): + x = features[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x) + x = self.dense(x) + x = torch.tanh(x) + x = self.dropout(x) + x = self.out_proj(x) + return x + + +def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask + return incremental_indices.long() + padding_idx diff --git a/src/transformers/models/esm/tokenization_esm.py b/src/transformers/models/esm/tokenization_esm.py new file mode 100644 index 00000000000000..0512ccf8645784 --- /dev/null +++ b/src/transformers/models/esm/tokenization_esm.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# Copyright Facebook and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for ESM.""" +import os +from typing import List, Optional, Union + +from ...tokenization_utils import PreTrainedTokenizer +from ...tokenization_utils_base import AddedToken +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "facebook/esm1b": "https://huggingface.co/facebook/esm1b/resolve/main/vocab.txt", + }, +} + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "facebook/esm1b": 1024, +} + + +def load_vocab_file(vocab_file): + with open(vocab_file, "r") as f: + lines = f.read().splitlines() + return [l.strip() for l in lines] + + +class EsmTokenizer(PreTrainedTokenizer): + """ + Constructs an ESM tokenizer. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__(self, vocab_file, **kwargs): + super().__init__(**kwargs) + self.all_tokens = load_vocab_file(vocab_file) + self._id_to_token = {ind: tok for ind, tok in enumerate(self.all_tokens)} + self._token_to_id = {tok: ind for ind, tok in enumerate(self.all_tokens)} + self.unk_token = "" + self.cls_token = "" + self.pad_token = "" + self.mask_token = "" + self.eos_token = "" + self.unique_no_split_tokens = self.all_tokens + self._create_trie(self.unique_no_split_tokens) + + def _convert_id_to_token(self, index: int) -> str: + return self._id_to_token.get(index, self.unk_token) + + def _convert_token_to_id(self, token: str) -> int: + return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) + + def _tokenize(self, text, **kwargs): + return text.split() + + def get_vocab_size(self, with_added_tokens=False): + return len(self._id_to_token) + + def token_to_id(self, token: str) -> int: + return self._token_to_id.get(token, self._token_to_id.get(self.unk_token)) + + def id_to_token(self, index: int) -> str: + return self._id_to_token.get(index, self.unk_token) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + if token_ids_1 is not None: + raise ValueError("Multiple input sentences are not supported!") + cls_: List[int] = [self.cls_token_id] + eos_: List[int] = [self.eos_token_id] + return cls_ + token_ids_0 + eos_ + + def save_vocabulary(self, save_directory, filename_prefix): + vocab_file = os.path.join(save_directory, (filename_prefix + "-" if filename_prefix else "") + "vocab.txt") + with open(vocab_file, "w") as f: + f.write("\n".join(self.all_tokens)) + return (vocab_file,) + + @property + def vocab_size(self) -> int: + return self.get_vocab_size(with_added_tokens=False) + + def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: + return super()._add_tokens(new_tokens, special_tokens=True) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index d564c08e9fc5cc..769cc4c4b346b5 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -1948,6 +1948,44 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +ESM_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class EsmForMaskedLM(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForSequenceClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmForTokenClassification(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class EsmPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/esm/__init__.py b/tests/models/esm/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/esm/test_modeling_esm.py b/tests/models/esm/test_modeling_esm.py new file mode 100644 index 00000000000000..7bd0a36c8b0cb8 --- /dev/null +++ b/tests/models/esm/test_modeling_esm.py @@ -0,0 +1,293 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch ESM model. """ + + +import unittest + +from transformers import EsmConfig, is_torch_available +from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device + +from ...generation.test_generation_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask + + +if is_torch_available(): + import torch + + from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel + from transformers.models.esm.modeling_esm import ( + ESM_PRETRAINED_MODEL_ARCHIVE_LIST, + EsmEmbeddings, + create_position_ids_from_input_ids, + ) + + +# copied from tests.test_modeling_roberta +class EsmModelTester: + def __init__( + self, + parent, + ): + self.parent = parent + self.batch_size = 13 + self.seq_length = 7 + self.is_training = False + self.use_input_mask = True + self.use_token_type_ids = False + self.use_labels = True + self.vocab_size = 99 + self.hidden_size = 32 + self.num_hidden_layers = 5 + self.num_attention_heads = 4 + self.intermediate_size = 37 + self.hidden_act = "gelu" + self.hidden_dropout_prob = 0.1 + self.attention_probs_dropout_prob = 0.1 + self.max_position_embeddings = 512 + self.type_vocab_size = 16 + self.type_sequence_label_size = 2 + self.initializer_range = 0.02 + self.num_labels = 3 + self.num_choices = 4 + self.scope = None + + def prepare_config_and_inputs(self): + input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + input_mask = None + if self.use_input_mask: + input_mask = random_attention_mask([self.batch_size, self.seq_length]) + + sequence_labels = None + token_labels = None + choice_labels = None + if self.use_labels: + sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) + token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) + choice_labels = ids_tensor([self.batch_size], self.num_choices) + + config = self.get_config() + + return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels + + def get_config(self): + return EsmConfig( + vocab_size=self.vocab_size, + hidden_size=self.hidden_size, + pad_token_id=1, + num_hidden_layers=self.num_hidden_layers, + num_attention_heads=self.num_attention_heads, + intermediate_size=self.intermediate_size, + hidden_act=self.hidden_act, + hidden_dropout_prob=self.hidden_dropout_prob, + attention_probs_dropout_prob=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + type_vocab_size=self.type_vocab_size, + initializer_range=self.initializer_range, + ) + + def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): + model = EsmModel(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask) + result = model(input_ids) + result = model(input_ids) + + self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) + self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) + + def create_and_check_for_masked_lm( + self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + model = EsmForMaskedLM(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, labels=token_labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) + + def create_and_check_for_token_classification( + self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels + ): + config.num_labels = self.num_labels + model = EsmForTokenClassification(config=config) + model.to(torch_device) + model.eval() + result = model(input_ids, attention_mask=input_mask, labels=token_labels) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) + + def prepare_config_and_inputs_for_common(self): + config_and_inputs = self.prepare_config_and_inputs() + ( + config, + input_ids, + input_mask, + sequence_labels, + token_labels, + choice_labels, + ) = config_and_inputs + inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} + return config, inputs_dict + + +@require_torch +class EsmModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + + test_mismatched_shapes = False + + all_model_classes = ( + ( + EsmForMaskedLM, + EsmModel, + EsmForSequenceClassification, + EsmForTokenClassification, + ) + if is_torch_available() + else () + ) + all_generative_model_classes = () + test_sequence_classification_problem_types = True + + def setUp(self): + self.model_tester = EsmModelTester(self) + self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_model(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_model_various_embeddings(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + for type in ["absolute", "relative_key", "relative_key_query"]: + config_and_inputs[0].position_embedding_type = type + self.model_tester.create_and_check_model(*config_and_inputs) + + def test_for_masked_lm(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) + + def test_for_token_classification(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_for_token_classification(*config_and_inputs) + + @slow + def test_model_from_pretrained(self): + for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: + model = EsmModel.from_pretrained(model_name) + self.assertIsNotNone(model) + + def test_create_position_ids_respects_padding_index(self): + """Ensure that the default position ids only assign a sequential . This is a regression + test for https://github.com/huggingface/transformers/issues/1761 + + The position ids should be masked with the embedding object's padding index. Therefore, the + first available non-padding position index is EsmEmbeddings.padding_idx + 1 + """ + config = self.model_tester.prepare_config_and_inputs()[0] + model = EsmEmbeddings(config=config) + + input_ids = torch.as_tensor([[12, 31, 13, model.padding_idx]]) + expected_positions = torch.as_tensor( + [ + [ + 0 + model.padding_idx + 1, + 1 + model.padding_idx + 1, + 2 + model.padding_idx + 1, + model.padding_idx, + ] + ] + ) + position_ids = create_position_ids_from_input_ids(input_ids, model.padding_idx) + self.assertEqual(position_ids.shape, expected_positions.shape) + self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) + + def test_create_position_ids_from_inputs_embeds(self): + """Ensure that the default position ids only assign a sequential . This is a regression + test for https://github.com/huggingface/transformers/issues/1761 + + The position ids should be masked with the embedding object's padding index. Therefore, the + first available non-padding position index is EsmEmbeddings.padding_idx + 1 + """ + config = self.model_tester.prepare_config_and_inputs()[0] + embeddings = EsmEmbeddings(config=config) + + inputs_embeds = torch.empty(2, 4, 30) + expected_single_positions = [ + 0 + embeddings.padding_idx + 1, + 1 + embeddings.padding_idx + 1, + 2 + embeddings.padding_idx + 1, + 3 + embeddings.padding_idx + 1, + ] + expected_positions = torch.as_tensor([expected_single_positions, expected_single_positions]) + position_ids = embeddings.create_position_ids_from_inputs_embeds(inputs_embeds) + self.assertEqual(position_ids.shape, expected_positions.shape) + self.assertTrue(torch.all(torch.eq(position_ids, expected_positions))) + + +@require_torch +class EsmModelIntegrationTest(TestCasePlus): + @slow + def test_inference_masked_lm(self): + model = EsmForMaskedLM.from_pretrained("Rocketknight1/esm-2-8m") + input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) + output = model(input_ids)[0] + + vocab_size = 33 + + expected_shape = torch.Size((1, 6, vocab_size)) + self.assertEqual(output.shape, expected_shape) + + expected_slice = torch.tensor( + [[[15.0973, -6.6406, -1.1351], [-0.2209, -9.9622, 4.2109], [-1.6055, -10.0023, 1.5914]]] + ) + self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) + + @slow + def test_inference_no_head(self): + model = EsmModel.from_pretrained("Rocketknight1/esm-2-8m") + + input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) + output = model(input_ids)[0] + # compare the actual values for a slice. + expected_slice = torch.tensor( + [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] + ) + self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) + + def test_lm_head_ignore_keys(self): + from copy import deepcopy + + keys_to_ignore_on_save_tied = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"] + keys_to_ignore_on_save_untied = [r"lm_head.decoder.bias"] + config = EsmConfig.from_pretrained("Rocketknight1/esm-2-8m") + config_tied = deepcopy(config) + config_tied.tie_word_embeddings = True + config_untied = deepcopy(config) + config_untied.tie_word_embeddings = False + for cls in [EsmForMaskedLM]: + model = cls(config_tied) + self.assertEqual(model._keys_to_ignore_on_save, keys_to_ignore_on_save_tied, cls) + + # the keys should be different when embeddings aren't tied + model = cls(config_untied) + self.assertEqual(model._keys_to_ignore_on_save, keys_to_ignore_on_save_untied, cls) + + # test that saving works with updated ignore keys - just testing that it doesn't fail + model.save_pretrained(self.get_auto_remove_tmp_dir()) diff --git a/tests/models/esm/test_tokenization_esm.py b/tests/models/esm/test_tokenization_esm.py new file mode 100644 index 00000000000000..242f6d77081ff5 --- /dev/null +++ b/tests/models/esm/test_tokenization_esm.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import tempfile +import unittest +from typing import List + +from transformers.models.esm.tokenization_esm import VOCAB_FILES_NAMES, EsmTokenizer +from transformers.testing_utils import require_tokenizers +from transformers.tokenization_utils import PreTrainedTokenizer +from transformers.tokenization_utils_base import PreTrainedTokenizerBase + + +@require_tokenizers +class ESMTokenizationTest(unittest.TestCase): + tokenizer_class = EsmTokenizer + + def setUp(self): + super().setUp() + self.tmpdirname = tempfile.mkdtemp() + # fmt: off + vocab_tokens: List[str] = ["", "", "", "", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "", ""] # noqa: E501 + # fmt: on + self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) + with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: + vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) + + def get_tokenizers(self, **kwargs) -> List[PreTrainedTokenizerBase]: + return [self.get_tokenizer(**kwargs)] + + def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer: + return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) + + def test_tokenizer_single_example(self): + tokenizer = self.tokenizer_class(self.vocab_file) + + tokens = tokenizer.tokenize("LAGVS") + self.assertListEqual(tokens, ["L", "A", "G", "V", "S"]) + self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [4, 5, 6, 7, 8]) + + def test_tokenizer_encode_single(self): + tokenizer = self.tokenizer_class(self.vocab_file) + + seq = "LAGVS" + self.assertListEqual(tokenizer.encode(seq), [0, 4, 5, 6, 7, 8, 2]) + + def test_tokenizer_call_no_pad(self): + tokenizer = self.tokenizer_class(self.vocab_file) + + seq_batch = ["LAGVS", "WCB"] + tokens_batch = tokenizer(seq_batch, padding=False)["input_ids"] + + self.assertListEqual(tokens_batch, [[0, 4, 5, 6, 7, 8, 2], [0, 22, 23, 25, 2]]) + + def test_tokenizer_call_pad(self): + tokenizer = self.tokenizer_class(self.vocab_file) + + seq_batch = ["LAGVS", "WCB"] + tokens_batch = tokenizer(seq_batch, padding=True)["input_ids"] + + self.assertListEqual(tokens_batch, [[0, 4, 5, 6, 7, 8, 2], [0, 22, 23, 25, 2, 1, 1]]) + + def test_tokenize_special_tokens(self): + """Test `tokenize` with special tokens.""" + tokenizers = self.get_tokenizers(fast=True) + for tokenizer in tokenizers: + with self.subTest(f"{tokenizer.__class__.__name__}"): + SPECIAL_TOKEN_1 = "" + SPECIAL_TOKEN_2 = "" + + token_1 = tokenizer.tokenize(SPECIAL_TOKEN_1) + token_2 = tokenizer.tokenize(SPECIAL_TOKEN_2) + + self.assertEqual(len(token_1), 1) + self.assertEqual(len(token_2), 1) + self.assertEqual(token_1[0], SPECIAL_TOKEN_1) + self.assertEqual(token_2[0], SPECIAL_TOKEN_2) From 582d085bb2c54e20907bfdfae24d0e9e37070ca6 Mon Sep 17 00:00:00 2001 From: Sayak Paul Date: Fri, 30 Sep 2022 18:55:41 +0530 Subject: [PATCH 427/539] Add expected output to the sample code for `ViTMSNForImageClassification` (#19183) * chore: add expected output to the sample code. * add: imagenet-1k labels to the model config. * chore: apply code formatting. * chore: change the expected output. --- .../models/vit_msn/convert_msn_to_pytorch.py | 9 +++++++++ src/transformers/models/vit_msn/modeling_vit_msn.py | 3 +++ 2 files changed, 12 insertions(+) diff --git a/src/transformers/models/vit_msn/convert_msn_to_pytorch.py b/src/transformers/models/vit_msn/convert_msn_to_pytorch.py index 535f5f742d631f..f04d26d5eb886a 100644 --- a/src/transformers/models/vit_msn/convert_msn_to_pytorch.py +++ b/src/transformers/models/vit_msn/convert_msn_to_pytorch.py @@ -15,11 +15,13 @@ """Convert ViT MSN checkpoints from the original repository: https://github.com/facebookresearch/msn""" import argparse +import json import torch from PIL import Image import requests +from huggingface_hub import hf_hub_download from transformers import ViTFeatureExtractor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD @@ -147,6 +149,13 @@ def convert_vit_msn_checkpoint(checkpoint_url, pytorch_dump_folder_path): config = ViTMSNConfig() config.num_labels = 1000 + repo_id = "datasets/huggingface/label-files" + filename = "imagenet-1k-id2label.json" + id2label = json.load(open(hf_hub_download(repo_id, filename), "r")) + id2label = {int(k): v for k, v in id2label.items()} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + if "s16" in checkpoint_url: config.hidden_size = 384 config.intermediate_size = 1536 diff --git a/src/transformers/models/vit_msn/modeling_vit_msn.py b/src/transformers/models/vit_msn/modeling_vit_msn.py index a190c42caa707b..f40d5278c06be1 100644 --- a/src/transformers/models/vit_msn/modeling_vit_msn.py +++ b/src/transformers/models/vit_msn/modeling_vit_msn.py @@ -632,6 +632,8 @@ def forward( >>> from PIL import Image >>> import requests + >>> torch.manual_seed(2) + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) @@ -644,6 +646,7 @@ def forward( >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) + Kerry blue terrier ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict From e396358104a3631be42617168c09fa8894148b0f Mon Sep 17 00:00:00 2001 From: Karim Foda <35491698+KMFODA@users.noreply.github.com> Date: Fri, 30 Sep 2022 16:26:51 +0300 Subject: [PATCH 428/539] Add stop sequence to text generation pipeline (#18444) --- src/transformers/generation_utils.py | 1 - .../pipelines/text2text_generation.py | 11 ++++++++++ src/transformers/pipelines/text_generation.py | 11 ++++++++++ tests/generation/test_generation_utils.py | 20 +++++++++++++++++++ .../test_pipelines_text_generation.py | 12 +++++++++++ 5 files changed, 54 insertions(+), 1 deletion(-) diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 71db5532ea38de..79460c1cad25c7 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -1343,7 +1343,6 @@ def generate( stopping_criteria = self._get_stopping_criteria( max_length=max_length, max_time=max_time, stopping_criteria=stopping_criteria ) - # 9. go into different generation modes if is_greedy_gen_mode: if num_return_sequences > 1: diff --git a/src/transformers/pipelines/text2text_generation.py b/src/transformers/pipelines/text2text_generation.py index 97cbc1a395d4f5..2247e57929bbb1 100644 --- a/src/transformers/pipelines/text2text_generation.py +++ b/src/transformers/pipelines/text2text_generation.py @@ -1,4 +1,5 @@ import enum +import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging @@ -59,6 +60,7 @@ def _sanitize_parameters( return_type=None, clean_up_tokenization_spaces=None, truncation=None, + stop_sequence=None, **generate_kwargs ): preprocess_params = {} @@ -76,6 +78,15 @@ def _sanitize_parameters( if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces + if stop_sequence is not None: + stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) + if len(stop_sequence_ids) > 1: + warnings.warn( + "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" + " the stop sequence will be used as the stop sequence string in the interim." + ) + generate_kwargs["eos_token_id"] = stop_sequence_ids[0] + return preprocess_params, forward_params, postprocess_params def check_inputs(self, input_length: int, min_length: int, max_length: int): diff --git a/src/transformers/pipelines/text_generation.py b/src/transformers/pipelines/text_generation.py index 7d15316492b9d4..4cb78c9bbe77f6 100644 --- a/src/transformers/pipelines/text_generation.py +++ b/src/transformers/pipelines/text_generation.py @@ -1,4 +1,5 @@ import enum +import warnings from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING @@ -80,6 +81,7 @@ def _sanitize_parameters( clean_up_tokenization_spaces=None, prefix=None, handle_long_generation=None, + stop_sequence=None, **generate_kwargs ): preprocess_params = {} @@ -121,6 +123,15 @@ def _sanitize_parameters( if clean_up_tokenization_spaces is not None: postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces + if stop_sequence is not None: + stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) + if len(stop_sequence_ids) > 1: + warnings.warn( + "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" + " the stop sequence will be used as the stop sequence string in the interim." + ) + generate_kwargs["eos_token_id"] = stop_sequence_ids[0] + return preprocess_params, forward_params, postprocess_params # overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments diff --git a/tests/generation/test_generation_utils.py b/tests/generation/test_generation_utils.py index e8cb57ccf3c97d..f48cfff83cb85a 100644 --- a/tests/generation/test_generation_utils.py +++ b/tests/generation/test_generation_utils.py @@ -37,6 +37,7 @@ Speech2TextForConditionalGeneration, SpeechEncoderDecoderModel, VisionEncoderDecoderModel, + pipeline, top_k_top_p_filtering, ) from transformers.generation_beam_constraints import DisjunctiveConstraint, PhrasalConstraint @@ -1979,6 +1980,25 @@ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwa [1, 18], ) + def test_stop_sequence_stopping_criteria(self): + + prompt = """Hello I believe in""" + generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart") + output = generator(prompt) + self.assertEqual( + output, + [ + { + "generated_text": ( + "Hello I believe in in in number number number number number number number number number" + ) + } + ], + ) + + output = generator(prompt, stop_sequence=" number") + self.assertEqual(output, [{"generated_text": "Hello I believe in in in number"}]) + def test_custom_logits_processor(self): bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" diff --git a/tests/pipelines/test_pipelines_text_generation.py b/tests/pipelines/test_pipelines_text_generation.py index a26ed56d4cd491..ac6d122559ee5d 100644 --- a/tests/pipelines/test_pipelines_text_generation.py +++ b/tests/pipelines/test_pipelines_text_generation.py @@ -147,6 +147,18 @@ def get_test_pipeline(self, model, tokenizer, feature_extractor): text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer) return text_generator, ["This is a test", "Another test"] + def test_stop_sequence_stopping_criteria(self): + prompt = """Hello I believe in""" + text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") + output = text_generator(prompt) + self.assertEqual( + output, + [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}], + ) + + output = text_generator(prompt, stop_sequence=" fe") + self.assertEqual(output, [{"generated_text": "Hello I believe in fe"}]) + def run_pipeline_test(self, text_generator, _): model = text_generator.model tokenizer = text_generator.tokenizer From dad578e4c36fa4611c0e63cbbdfb1f3dc4145078 Mon Sep 17 00:00:00 2001 From: Jingya HUANG <44135271+JingyaHuang@users.noreply.github.com> Date: Fri, 30 Sep 2022 16:04:36 +0200 Subject: [PATCH 429/539] Add notebooks (#19259) --- notebooks/README.md | 80 +++++++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 39 deletions(-) diff --git a/notebooks/README.md b/notebooks/README.md index 1a25cdd8044d14..fde9001791ac68 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -18,9 +18,9 @@ limitations under the License. You can find here a list of the official notebooks provided by Hugging Face. -Also, we would like to list here interesting content created by the community. -If you wrote some notebook(s) leveraging 🤗 Transformers and would like be listed here, please open a -Pull Request so it can be included under the Community notebooks. +Also, we would like to list here interesting content created by the community. +If you wrote some notebook(s) leveraging 🤗 Transformers and would like be listed here, please open a +Pull Request so it can be included under the Community notebooks. ## Hugging Face's notebooks 🤗 @@ -31,53 +31,53 @@ You can open any page of the documentation as a notebook in colab (there is a bu | Notebook | Description | | | |:----------|:-------------|:-------------|------:| -| [Quicktour of the library](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb) | A presentation of the various APIs in Transformers |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/en/transformers_doc/quicktour.ipynb)| -| [Summary of the tasks](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb) | How to run the models of the Transformers library task by task |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)| -| [Preprocessing data](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb) | How to use a tokenizer to preprocess your data |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)| -| [Fine-tuning a pretrained model](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb) | How to use the Trainer to fine-tune a pretrained model |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)| -| [Summary of the tokenizers](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb) | The differences between the tokenizers algorithm |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)| -| [Multilingual models](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb) | How to use the multilingual models of the library |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)| +| [Quicktour of the library](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb) | A presentation of the various APIs in Transformers |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/en/transformers_doc/quicktour.ipynb)| +| [Summary of the tasks](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb) | How to run the models of the Transformers library task by task |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)| +| [Preprocessing data](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb) | How to use a tokenizer to preprocess your data |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)| +| [Fine-tuning a pretrained model](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb) | How to use the Trainer to fine-tune a pretrained model |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)| +| [Summary of the tokenizers](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb) | The differences between the tokenizers algorithm |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)| +| [Multilingual models](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb) | How to use the multilingual models of the library |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)| ### PyTorch Examples | Notebook | Description | | | |:----------|:-------------|:-------------|------:| -| [Train your tokenizer](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | How to train and use your very own tokenizer |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| -| [Train your language model](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb) | How to easily start using transformers |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)| -| [How to fine-tune a model on text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on any GLUE task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| -| [How to fine-tune a model on language modeling](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| -| [How to fine-tune a model on token classification](https://github.com/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS). | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| -| [How to fine-tune a model on question answering](https://github.com/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SQUAD. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| -| [How to fine-tune a model on multiple choice](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SWAG. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| -| [How to fine-tune a model on translation](https://github.com/huggingface/notebooks/blob/main/examples/translation.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on WMT. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation.ipynb)| -| [How to fine-tune a model on summarization](https://github.com/huggingface/notebooks/blob/main/examples/summarization.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on XSUM. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)| -| [How to fine-tune a speech recognition model in English](https://github.com/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| Show how to preprocess the data and fine-tune a pretrained Speech model on TIMIT | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| -| [How to fine-tune a speech recognition model in any language](https://github.com/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| Show how to preprocess the data and fine-tune a multi-lingually pretrained speech model on Common Voice | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| -| [How to fine-tune a model on audio classification](https://github.com/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained Speech model on Keyword Spotting | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| -| [How to train a language model from scratch](https://github.com/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| Highlight all the steps to effectively train Transformer model on custom data | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| -| [How to generate text](https://github.com/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| How to use different decoding methods for language generation with transformers | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| -| [How to generate text (with constraints)](https://github.com/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| How to guide language generation with user-provided constraints | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| +| [Train your tokenizer](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | How to train and use your very own tokenizer |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| +| [Train your language model](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb) | How to easily start using transformers |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)| +| [How to fine-tune a model on text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on any GLUE task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| +| [How to fine-tune a model on language modeling](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| +| [How to fine-tune a model on token classification](https://github.com/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS). | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| +| [How to fine-tune a model on question answering](https://github.com/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SQUAD. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| +| [How to fine-tune a model on multiple choice](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SWAG. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| +| [How to fine-tune a model on translation](https://github.com/huggingface/notebooks/blob/main/examples/translation.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on WMT. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation.ipynb)| +| [How to fine-tune a model on summarization](https://github.com/huggingface/notebooks/blob/main/examples/summarization.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on XSUM. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)| +| [How to fine-tune a speech recognition model in English](https://github.com/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| Show how to preprocess the data and fine-tune a pretrained Speech model on TIMIT | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| +| [How to fine-tune a speech recognition model in any language](https://github.com/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| Show how to preprocess the data and fine-tune a multi-lingually pretrained speech model on Common Voice | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| +| [How to fine-tune a model on audio classification](https://github.com/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained Speech model on Keyword Spotting | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| +| [How to train a language model from scratch](https://github.com/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| Highlight all the steps to effectively train Transformer model on custom data | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| +| [How to generate text](https://github.com/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| How to use different decoding methods for language generation with transformers | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| +| [How to generate text (with constraints)](https://github.com/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| How to guide language generation with user-provided constraints | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| | [How to export model to ONNX](https://github.com/huggingface/notebooks/blob/main/examples/onnx-export.ipynb)| Highlight how to export and run inference workloads through ONNX | -| [How to use Benchmarks](https://github.com/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| How to benchmark models with transformers | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| -| [Reformer](https://github.com/huggingface/blog/blob/main/notebooks/03_reformer.ipynb)| How Reformer pushes the limits of language modeling | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)| -| [How to fine-tune a model on image classification (Torchvision)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb) | Show how to preprocess the data using Torchvision and fine-tune any pretrained Vision model on Image Classification | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)| -| [How to fine-tune a model on image classification (Albumentations)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) | Show how to preprocess the data using Albumentations and fine-tune any pretrained Vision model on Image Classification | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)| -| [How to perform zero-shot object detection with OWL-ViT](https://github.com/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb) | Show how to perform zero-shot object detection on images with text queries| [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)| +| [How to use Benchmarks](https://github.com/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| How to benchmark models with transformers | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| +| [Reformer](https://github.com/huggingface/blog/blob/main/notebooks/03_reformer.ipynb)| How Reformer pushes the limits of language modeling | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)| +| [How to fine-tune a model on image classification (Torchvision)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb) | Show how to preprocess the data using Torchvision and fine-tune any pretrained Vision model on Image Classification | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)| +| [How to fine-tune a model on image classification (Albumentations)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) | Show how to preprocess the data using Albumentations and fine-tune any pretrained Vision model on Image Classification | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)| +| [How to perform zero-shot object detection with OWL-ViT](https://github.com/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb) | Show how to perform zero-shot object detection on images with text queries| [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)| ### TensorFlow Examples | Notebook | Description | | | |:----------|:-------------|:-------------|------:| -| [Train your tokenizer](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | How to train and use your very own tokenizer |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| -| [Train your language model](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb) | How to easily start using transformers |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)| -| [How to fine-tune a model on text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on any GLUE task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| -| [How to fine-tune a model on language modeling](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| -| [How to fine-tune a model on token classification](https://github.com/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS). | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| -| [How to fine-tune a model on question answering](https://github.com/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SQUAD. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| -| [How to fine-tune a model on multiple choice](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SWAG. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| -| [How to fine-tune a model on translation](https://github.com/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on WMT. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| -| [How to fine-tune a model on summarization](https://github.com/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on XSUM. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| +| [Train your tokenizer](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | How to train and use your very own tokenizer |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| +| [Train your language model](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb) | How to easily start using transformers |[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)| +| [How to fine-tune a model on text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on any GLUE task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| +| [How to fine-tune a model on language modeling](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| +| [How to fine-tune a model on token classification](https://github.com/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS). | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| +| [How to fine-tune a model on question answering](https://github.com/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SQUAD. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| +| [How to fine-tune a model on multiple choice](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SWAG. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| +| [How to fine-tune a model on translation](https://github.com/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on WMT. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| +| [How to fine-tune a model on summarization](https://github.com/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on XSUM. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| ### Optimum notebooks @@ -86,7 +86,9 @@ You can open any page of the documentation as a notebook in colab (there is a bu | Notebook | Description | | | |:----------|:-------------|:-------------|------:| | [How to quantize a model with ONNX Runtime for text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_quantization_ort.ipynb)| Show how to apply static and dynamic quantization on a model using [ONNX Runtime](https://github.com/microsoft/onnxruntime) for any GLUE task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_ort.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_ort.ipynb)| -| [How to quantize a model with Intel Neural Compressor for text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| Show how to apply static, dynamic and aware training quantization on a model using [Intel Neural Compressor (INC)](https://github.com/intel/neural-compressor) for any GLUE task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| +| [How to quantize a model with Intel Neural Compressor for text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| Show how to apply static, dynamic and aware training quantization on a model using [Intel Neural Compressor (INC)](https://github.com/intel/neural-compressor) for any GLUE task. | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| +| [How to fine-tune a model on text classification with ONNX Runtime](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_ort.ipynb)| Show how to preprocess the data and fine-tune a model on any GLUE task using [ONNX Runtime](https://github.com/microsoft/onnxruntime). | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_ort.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_ort.ipynb)| +| [How to fine-tune a model on summarization with ONNX Runtime](https://github.com/huggingface/notebooks/blob/main/examples/summarization_ort.ipynb)| Show how to preprocess the data and fine-tune a model on XSUM using [ONNX Runtime](https://github.com/microsoft/onnxruntime). | [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization_ort.ipynb)| [![Open in AWS Studio](https://studiolab.sagemaker.aws/studiolab.svg)](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization_ort.ipynb)| ## Community notebooks: From 3e2dd7f92d20f67e34f649cdafbbe28d22a75f44 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 30 Sep 2022 10:58:04 -0400 Subject: [PATCH 430/539] Poc to use safetensors (#19175) * Poc to use safetensors * Typo * Final version * Add tests * Save with the right name! * Update tests/test_modeling_common.py Co-authored-by: Julien Chaumond * Support for sharded checkpoints * Test from Hub part 1 * Test from hub part 2 * Fix regular checkpoint sharding * Bump for fixes Co-authored-by: Julien Chaumond --- setup.py | 2 + src/transformers/__init__.py | 2 + src/transformers/dependency_versions_table.py | 1 + src/transformers/modeling_utils.py | 84 +++++++++++++++++-- src/transformers/testing_utils.py | 8 ++ src/transformers/utils/__init__.py | 3 + src/transformers/utils/import_utils.py | 4 + tests/test_modeling_common.py | 54 ++++++++++++ 8 files changed, 150 insertions(+), 8 deletions(-) diff --git a/setup.py b/setup.py index f2e533ce98710e..0de934233dd83f 100644 --- a/setup.py +++ b/setup.py @@ -148,6 +148,7 @@ "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "sacrebleu>=1.4.12,<2.0.0", "sacremoses", + "safetensors>=0.2.1", "sagemaker>=2.31.0", "scikit-learn", "sentencepiece>=0.1.91,!=0.1.92", @@ -300,6 +301,7 @@ def run(self): "protobuf", # Can be removed once we can unpin protobuf "sacremoses", "rjieba", + "safetensors", ) + extras["retrieval"] + extras["modelcreation"] diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 93c3118f691909..e8c4a8939f45bf 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -478,6 +478,7 @@ "is_psutil_available", "is_py3nvml_available", "is_pyctcdecode_available", + "is_safetensors_available", "is_scipy_available", "is_sentencepiece_available", "is_sklearn_available", @@ -3417,6 +3418,7 @@ is_psutil_available, is_py3nvml_available, is_pyctcdecode_available, + is_safetensors_available, is_scipy_available, is_sentencepiece_available, is_sklearn_available, diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index fae26de7bbab0a..4b3c79e65bb3ee 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -54,6 +54,7 @@ "rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1", "sacrebleu": "sacrebleu>=1.4.12,<2.0.0", "sacremoses": "sacremoses", + "safetensors": "safetensors>=0.2.1", "sagemaker": "sagemaker>=2.31.0", "scikit-learn": "scikit-learn", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index bb35bf7c80336e..ec876af9e55e1f 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -50,6 +50,8 @@ from .utils import ( DUMMY_INPUTS, FLAX_WEIGHTS_NAME, + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, @@ -65,6 +67,7 @@ is_bitsandbytes_available, is_offline_mode, is_remote_url, + is_safetensors_available, logging, replace_return_docstrings, ) @@ -86,6 +89,10 @@ else: get_balanced_memory = None +if is_safetensors_available(): + from safetensors import safe_open + from safetensors.torch import load_file as safe_load_file + from safetensors.torch import save_file as safe_save_file logger = logging.get_logger(__name__) @@ -241,7 +248,9 @@ def dtype_byte_size(dtype): return bit_size // 8 -def shard_checkpoint(state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB"): +def shard_checkpoint( + state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME +): """ Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a given size. @@ -263,6 +272,8 @@ def shard_checkpoint(state_dict: Dict[str, torch.Tensor], max_shard_size: Union[ max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`): The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`). + weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`): + The name of the model save file. """ max_shard_size = convert_file_size_to_int(max_shard_size) @@ -289,13 +300,16 @@ def shard_checkpoint(state_dict: Dict[str, torch.Tensor], max_shard_size: Union[ # If we only have one shard, we return it if len(sharded_state_dicts) == 1: - return {WEIGHTS_NAME: sharded_state_dicts[0]}, None + return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index weight_map = {} shards = {} for idx, shard in enumerate(sharded_state_dicts): - shard_file = WEIGHTS_NAME.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin") + shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin") + shard_file = shard_file.replace( + ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors" + ) shards[shard_file] = shard for key in shard.keys(): weight_map[key] = shard_file @@ -367,6 +381,20 @@ def load_state_dict(checkpoint_file: Union[str, os.PathLike]): """ Reads a PyTorch checkpoint file, returning properly formatted errors if they arise. """ + if checkpoint_file.endswith(".safetensors") and is_safetensors_available(): + # Check format of the archive + with safe_open(checkpoint_file, framework="pt") as f: + metadata = f.metadata() + if metadata.get("format") not in ["pt", "tf", "flax"]: + raise OSError( + f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure " + "you save your model with the `save_pretrained` method." + ) + elif metadata["format"] != "pt": + raise NotImplementedError( + f"Conversion from a {metadata['format']} safetensors archive to PyTorch is not implemented yet." + ) + return safe_load_file(checkpoint_file) try: return torch.load(checkpoint_file, map_location="cpu") except Exception as e: @@ -1468,6 +1496,7 @@ def save_pretrained( save_function: Callable = torch.save, push_to_hub: bool = False, max_shard_size: Union[int, str] = "10GB", + safe_serialization: bool = False, **kwargs, ): """ @@ -1503,6 +1532,9 @@ def save_pretrained(
+ safe_serialization (`bool`, *optional*, defaults to `False`): + Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + kwargs: Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. """ @@ -1511,6 +1543,8 @@ def save_pretrained( "`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead." ) is_main_process = kwargs.pop("save_config") + if safe_serialization and not is_safetensors_available(): + raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.") if os.path.isfile(save_directory): logger.error(f"Provided path ({save_directory}) should be a directory, not a file") @@ -1560,15 +1594,17 @@ def save_pretrained( del state_dict[ignore_key] # Shard the model if it is too big. - shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size) + weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME + shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name) # Clean the folder from a previous save for filename in os.listdir(save_directory): full_filename = os.path.join(save_directory, filename) # If we have a shard file that is not going to be replaced, we delete it, but only from the main process # in distributed settings to avoid race conditions. + weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "") if ( - filename.startswith(WEIGHTS_NAME[:-4]) + filename.startswith(weights_no_suffix) and os.path.isfile(full_filename) and filename not in shards.keys() and is_main_process @@ -1577,12 +1613,18 @@ def save_pretrained( # Save the model for shard_file, shard in shards.items(): - save_function(shard, os.path.join(save_directory, shard_file)) + if safe_serialization: + # At some point we will need to deal better with save_function (used for TPU and other distributed + # joyfulness), but for now this enough. + safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={"format": "pt"}) + else: + save_function(shard, os.path.join(save_directory, shard_file)) if index is None: logger.info(f"Model weights saved in {os.path.join(save_directory, WEIGHTS_NAME)}") else: - save_index_file = os.path.join(save_directory, WEIGHTS_INDEX_NAME) + save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME + save_index_file = os.path.join(save_directory, save_index_file) # Save the index as well with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" @@ -1966,6 +2008,17 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P ): # Load from a Flax checkpoint in priority if from_flax archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME) + elif is_safetensors_available() and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, SAFE_WEIGHTS_NAME) + ): + # Load from a safetensors checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, SAFE_WEIGHTS_NAME) + elif is_safetensors_available() and os.path.isfile( + os.path.join(pretrained_model_name_or_path, subfolder, SAFE_WEIGHTS_INDEX_NAME) + ): + # Load from a sharded safetensors checkpoint + archive_file = os.path.join(pretrained_model_name_or_path, subfolder, SAFE_WEIGHTS_INDEX_NAME) + is_sharded = True elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME) @@ -2013,6 +2066,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P filename = TF2_WEIGHTS_NAME elif from_flax: filename = FLAX_WEIGHTS_NAME + elif is_safetensors_available(): + filename = SAFE_WEIGHTS_NAME else: filename = WEIGHTS_NAME @@ -2033,8 +2088,21 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P ) resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs) - # Since we set _raise_exceptions_for_missing_entries=False, we don't get an expection but a None + # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None # result when internet is up, the repo and revision exist, but the file does not. + if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME: + # Maybe the checkpoint is sharded, we try to grab the index name in this case. + resolved_archive_file = cached_file( + pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **cached_file_kwargs + ) + if resolved_archive_file is not None: + is_sharded = True + else: + # This repo has no safetensors file of any kind, we switch to PyTorch. + filename = WEIGHTS_NAME + resolved_archive_file = cached_file( + pretrained_model_name_or_path, WEIGHTS_NAME, **cached_file_kwargs + ) if resolved_archive_file is None and filename == WEIGHTS_NAME: # Maybe the checkpoint is sharded, we try to grab the index name in this case. resolved_archive_file = cached_file( diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 65c15fbd967ba0..7e3242e94c945c 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -60,6 +60,7 @@ is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, + is_safetensors_available, is_scatter_available, is_scipy_available, is_sentencepiece_available, @@ -264,6 +265,13 @@ def require_accelerate(test_case): return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case) +def require_safetensors(test_case): + """ + Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed. + """ + return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case) + + def require_rjieba(test_case): """ Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed. diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 7f3f704ac4a38b..fdd1c376dabdbf 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -111,6 +111,7 @@ is_pytorch_quantization_available, is_rjieba_available, is_sacremoses_available, + is_safetensors_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scatter_available, @@ -156,6 +157,8 @@ TF_WEIGHTS_NAME = "model.ckpt" FLAX_WEIGHTS_NAME = "flax_model.msgpack" FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json" +SAFE_WEIGHTS_NAME = "model.safetensors" +SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json" CONFIG_NAME = "config.json" FEATURE_EXTRACTOR_NAME = "preprocessor_config.json" MODEL_CARD_NAME = "modelcard.json" diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 16616e0772dd97..81b7c478c1b2aa 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -533,6 +533,10 @@ def is_accelerate_available(): return importlib.util.find_spec("accelerate") is not None +def is_safetensors_available(): + return importlib.util.find_spec("safetensors") is not None + + def is_tokenizers_available(): return importlib.util.find_spec("tokenizers") is not None diff --git a/tests/test_modeling_common.py b/tests/test_modeling_common.py index 42ecad03c6aee9..0a55f1d11c35f5 100755 --- a/tests/test_modeling_common.py +++ b/tests/test_modeling_common.py @@ -53,6 +53,7 @@ is_pt_tf_cross_test, is_staging_test, require_accelerate, + require_safetensors, require_torch, require_torch_gpu, require_torch_multi_gpu, @@ -61,6 +62,8 @@ torch_device, ) from transformers.utils import ( + SAFE_WEIGHTS_INDEX_NAME, + SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, is_accelerate_available, @@ -2980,6 +2983,57 @@ def test_legacy_load_from_url(self): "https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/pytorch_model.bin", config=config ) + @require_safetensors + def test_safetensors_save_and_load(self): + model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True) + # No pytorch_model.bin file, only a model.safetensors + self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) + self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) + + new_model = BertModel.from_pretrained(tmp_dir) + + # Check models are equal + for p1, p2 in zip(model.parameters(), new_model.parameters()): + self.assertTrue(torch.allclose(p1, p2)) + + @require_safetensors + def test_safetensors_load_from_hub(self): + safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-safetensors") + pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") + + # Check models are equal + for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): + self.assertTrue(torch.allclose(p1, p2)) + + @require_safetensors + def test_safetensors_save_and_load_sharded(self): + model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert") + with tempfile.TemporaryDirectory() as tmp_dir: + model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="100kB") + # No pytorch_model.bin index file, only a model.safetensors index + self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_INDEX_NAME))) + self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))) + # No regular weights file + self.assertFalse(os.path.isfile(os.path.join(tmp_dir, WEIGHTS_NAME))) + self.assertFalse(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME))) + + new_model = BertModel.from_pretrained(tmp_dir) + + # Check models are equal + for p1, p2 in zip(model.parameters(), new_model.parameters()): + self.assertTrue(torch.allclose(p1, p2)) + + @require_safetensors + def test_safetensors_load_from_hub_sharded(self): + safetensors_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded-safetensors") + pytorch_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded") + + # Check models are equal + for p1, p2 in zip(safetensors_model.parameters(), pytorch_model.parameters()): + self.assertTrue(torch.allclose(p1, p2)) + @require_torch @is_staging_test From 2fba98e5859d6d8dc520ec466891f1eef630c094 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 30 Sep 2022 18:14:01 +0200 Subject: [PATCH 431/539] Add `beautifulsoup4` to the dependency list (#19253) * Add `beautifulsoup4` to extras["testing"] Co-authored-by: ydshieh --- setup.py | 2 ++ src/transformers/dependency_versions_table.py | 1 + 2 files changed, 3 insertions(+) diff --git a/setup.py b/setup.py index 0de934233dd83f..9cb00068ac3280 100644 --- a/setup.py +++ b/setup.py @@ -169,6 +169,7 @@ "unidic>=1.0.2", "unidic_lite>=1.0.7", "uvicorn", + "beautifulsoup4", ] @@ -302,6 +303,7 @@ def run(self): "sacremoses", "rjieba", "safetensors", + "beautifulsoup4", ) + extras["retrieval"] + extras["modelcreation"] diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 4b3c79e65bb3ee..89522d9c6ac4ad 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -75,4 +75,5 @@ "unidic": "unidic>=1.0.2", "unidic_lite": "unidic_lite>=1.0.7", "uvicorn": "uvicorn", + "beautifulsoup4": "beautifulsoup4", } From f33858d18a0a218adee56ae08fb57199e6dca64a Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Fri, 30 Sep 2022 18:15:07 +0200 Subject: [PATCH 432/539] Fix Encoder-Decoder testing issue about repo. names (#19250) * Change "../gpt2" to "gpt2" Co-authored-by: ydshieh --- .../test_modeling_encoder_decoder.py | 4 ++-- .../test_modeling_tf_encoder_decoder.py | 2 +- .../test_modeling_tf_vision_encoder_decoder.py | 18 +++++++----------- 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/tests/models/encoder_decoder/test_modeling_encoder_decoder.py b/tests/models/encoder_decoder/test_modeling_encoder_decoder.py index 6980ed6cb26e23..32cae5066669b6 100644 --- a/tests/models/encoder_decoder/test_modeling_encoder_decoder.py +++ b/tests/models/encoder_decoder/test_modeling_encoder_decoder.py @@ -919,7 +919,7 @@ def prepare_config_and_inputs(self): } def get_pretrained_model(self): - return EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "../gpt2") + return EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-cased", "gpt2") def test_encoder_decoder_model_shared_weights(self): pass @@ -930,7 +930,7 @@ def test_bert2gpt2_summarization(self): model.to(torch_device) tokenizer_in = AutoTokenizer.from_pretrained("bert-base-cased") - tokenizer_out = AutoTokenizer.from_pretrained("../gpt2") + tokenizer_out = AutoTokenizer.from_pretrained("gpt2") ARTICLE_STUDENTS = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.""" diff --git a/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py b/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py index d179d5f9d517ea..6637e99f33096a 100644 --- a/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py +++ b/tests/models/encoder_decoder/test_modeling_tf_encoder_decoder.py @@ -908,7 +908,7 @@ def test_bert2gpt2_summarization(self): from transformers import EncoderDecoderModel tokenizer_in = AutoTokenizer.from_pretrained("bert-base-cased") - tokenizer_out = AutoTokenizer.from_pretrained("../gpt2") + tokenizer_out = AutoTokenizer.from_pretrained("gpt2") """Not working, because pt checkpoint has `encoder.encoder.layer...` while tf model has `encoder.bert.encoder.layer...`. (For GPT2 decoder, there is no issue) diff --git a/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py b/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py index 97ac813905300c..16d42229e369a2 100644 --- a/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py +++ b/tests/models/vision_encoder_decoder/test_modeling_tf_vision_encoder_decoder.py @@ -673,9 +673,7 @@ def test_real_model_save_load_from_pretrained(self): @require_tf class TFViT2GPT2EncoderDecoderModelTest(TFVisionEncoderDecoderMixin, unittest.TestCase): def get_pretrained_model(self): - return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( - "google/vit-base-patch16-224-in21k", "../gpt2" - ) + return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained("google/vit-base-patch16-224-in21k", "gpt2") def get_encoder_decoder_model(self, config, decoder_config): encoder_model = TFViTModel(config, name="encoder") @@ -720,12 +718,10 @@ def prepare_config_and_inputs(self): @require_tf class TFVisionEncoderDecoderModelTest(unittest.TestCase): def get_from_encoderdecoder_pretrained_model(self): - return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained( - "google/vit-base-patch16-224-in21k", "../gpt2" - ) + return TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained("google/vit-base-patch16-224-in21k", "gpt2") def get_decoder_config(self): - config = AutoConfig.from_pretrained("../gpt2") + config = AutoConfig.from_pretrained("gpt2") config.is_decoder = True config.add_cross_attention = True return config @@ -735,7 +731,7 @@ def get_encoderdecoder_model(self): def get_encoder_decoder_models(self): encoder_model = TFViTModel.from_pretrained("google/vit-base-patch16-224-in21k", name="encoder") - decoder_model = TFGPT2LMHeadModel.from_pretrained("../gpt2", config=self.get_decoder_config(), name="decoder") + decoder_model = TFGPT2LMHeadModel.from_pretrained("gpt2", config=self.get_decoder_config(), name="decoder") return {"encoder": encoder_model, "decoder": decoder_model} def _check_configuration_tie(self, model): @@ -764,7 +760,7 @@ def prepare_img(): class TFVisionEncoderDecoderModelSaveLoadTests(unittest.TestCase): def get_encoder_decoder_config(self): encoder_config = AutoConfig.from_pretrained("google/vit-base-patch16-224-in21k") - decoder_config = AutoConfig.from_pretrained("../gpt2", is_decoder=True, add_cross_attention=True) + decoder_config = AutoConfig.from_pretrained("gpt2", is_decoder=True, add_cross_attention=True) return VisionEncoderDecoderConfig.from_encoder_decoder_configs(encoder_config, decoder_config) def get_encoder_decoder_config_small(self): @@ -879,7 +875,7 @@ def test_encoder_decoder_from_pretrained(self): config = self.get_encoder_decoder_config() feature_extractor = AutoFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") - decoder_tokenizer = AutoTokenizer.from_pretrained("../gpt2") + decoder_tokenizer = AutoTokenizer.from_pretrained("gpt2") img = prepare_img() pixel_values = feature_extractor(images=img, return_tensors="tf").pixel_values @@ -896,7 +892,7 @@ def test_encoder_decoder_from_pretrained(self): encoder = TFAutoModel.from_pretrained("google/vit-base-patch16-224-in21k", name="encoder") # It's necessary to specify `add_cross_attention=True` here. decoder = TFAutoModelForCausalLM.from_pretrained( - "../gpt2", is_decoder=True, add_cross_attention=True, name="decoder" + "gpt2", is_decoder=True, add_cross_attention=True, name="decoder" ) pretrained_encoder_dir = os.path.join(tmp_dirname, "pretrained_encoder") pretrained_decoder_dir = os.path.join(tmp_dirname, "pretrained_decoder") From 6a08162ad4ef2e08de45301cc9197f2bee01cbef Mon Sep 17 00:00:00 2001 From: Keith Kjer Date: Fri, 30 Sep 2022 12:13:39 -0700 Subject: [PATCH 433/539] Fix cached lookup filepath on windows for hub (#19178) * Update hub.py commit_hash extraction Add safety mechanism for windows systems to unify logic (replace double backslashes with /) * Fix string quotetype * Aaaa circleci is messing with me. * Switch to using as_posix() method from pathlib * Update src/transformers/utils/hub.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/utils/hub.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- src/transformers/utils/hub.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index 2d24b233f8d015..a6ef0c86cdcb7a 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -222,7 +222,7 @@ def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str] """ if resolved_file is None or commit_hash is not None: return commit_hash - + resolved_file = str(Path(resolved_file).as_posix()) search = re.search(r"snapshots/([^/]+)/", resolved_file) if search is None: return None From cfb777f27ca5376f4f9d2723efa18604e82747b5 Mon Sep 17 00:00:00 2001 From: Joao Gante Date: Fri, 30 Sep 2022 20:30:38 +0100 Subject: [PATCH 434/539] Docs - Guide to add a new TensorFlow model (#19256) Co-authored-by: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Co-authored-by: Matt --- docs/source/en/_toctree.yml | 6 +- docs/source/en/add_new_model.mdx | 14 +- docs/source/en/add_tensorflow_model.mdx | 346 ++++++++++++++++++ .../en/converting_tensorflow_models.mdx | 2 +- 4 files changed, 359 insertions(+), 9 deletions(-) create mode 100644 docs/source/en/add_tensorflow_model.mdx diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 6e0f764465c73e..08f3320047ed09 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -31,7 +31,7 @@ - local: sagemaker title: Run training on Amazon SageMaker - local: converting_tensorflow_models - title: Converting TensorFlow Checkpoints + title: Converting from TensorFlow checkpoints - local: serialization title: Export 🤗 Transformers models - local: troubleshooting @@ -109,6 +109,8 @@ title: How to contribute to transformers? - local: add_new_model title: How to add a model to 🤗 Transformers? + - local: add_tensorflow_model + title: How to convert a 🤗 Transformers model to TensorFlow? - local: add_new_pipeline title: How to add a pipeline to 🤗 Transformers? - local: testing @@ -511,4 +513,4 @@ - local: internal/file_utils title: General Utilities title: Internal Helpers - title: API \ No newline at end of file + title: API diff --git a/docs/source/en/add_new_model.mdx b/docs/source/en/add_new_model.mdx index 799bf8074e7d1c..8f110121d7ce0b 100644 --- a/docs/source/en/add_new_model.mdx +++ b/docs/source/en/add_new_model.mdx @@ -106,7 +106,7 @@ own regarding how code should be written :-) for a good example). 2. The code should be fully understandable, even by a non-native English speaker. This means you should pick descriptive variable names and avoid abbreviations. As an example, `activation` is preferred to `act`. - One-letter variable names are strongly discouraged unless it's an index in a for loop. + One-letter variable names are strongly discouraged unless it's an index in a for loop. 3. More generally we prefer longer explicit code to short magical one. 4. Avoid subclassing `nn.Sequential` in PyTorch but subclass `nn.Module` and write the forward pass, so that anyone using your code can quickly debug it by adding print statements or breaking points. @@ -222,7 +222,7 @@ cd .. 5. To port *brand_new_bert*, you will also need access to its original repository: ```bash -git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git +git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git cd brand_new_bert pip install -e . ``` @@ -683,10 +683,11 @@ work left to be done should be a cakewalk 😊. At this point, you have successfully added a new model. However, it is very much possible that the model does not yet fully comply with the required design. To make sure, the implementation is fully compatible with 🤗 Transformers, all common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under -the same `tests/test_modeling_brand_new_bert.py`. Run this test file to verify that all common tests pass: +the same `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`. Run this test file to verify that all common +tests pass: ```bash -pytest tests/test_modeling_brand_new_bert.py +pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py ``` Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that @@ -700,7 +701,7 @@ Cookiecutter, called `BrandNewBertModelIntegrationTests` and only has to be fill tests are passing, run ```bash -RUN_SLOW=1 pytest -sv tests/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests +RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests ``` @@ -758,7 +759,8 @@ contain a couple of hard-coded integration tests. **10. Run End-to-end integration tests** Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the -tokenizer to `tests/test_modeling_brand_new_bert.py` in 🤗 Transformers. Such a test should show on a meaningful +tokenizer to `tests/models/brand_new_bert/test_modeling_brand_new_bert.py` in 🤗 Transformers. +Such a test should show on a meaningful text-to-text sample that the 🤗 Transformers implementation works as expected. A meaningful text-to-text sample can include *e.g.* a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc… If none of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a diff --git a/docs/source/en/add_tensorflow_model.mdx b/docs/source/en/add_tensorflow_model.mdx new file mode 100644 index 00000000000000..756af3d44e8567 --- /dev/null +++ b/docs/source/en/add_tensorflow_model.mdx @@ -0,0 +1,346 @@ + + +# How to convert a 🤗 Transformers model to TensorFlow? + +Having multiple frameworks available to use with 🤗 Transformers gives you flexibility to play their strengths when +designing your application, but it implies that compatibility must be added on a per-model basis. The good news is that +adding TensorFlow compatibility to an existing model is simpler than [adding a new model from scratch](add_new_model)! +Whether you wish to have a deeper understanding of large TensorFlow models, make a major open-source contribution, or +enable TensorFlow for your model of choice, this guide is for you. + +This guide empowers you, a member of our community, to contribute TensorFlow model weights and/or +architectures to be used in 🤗 Transformers, with minimal supervision from the Hugging Face team. Writing a new model +is no small feat, but hopefully this guide will make it less of a rollercoaster 🎢 and more of a walk in the park 🚶. +Harnessing our collective experiences is absolutely critical to make this process increasingly easier, and thus we +highly encourage that you suggest improvements to this guide! + +Before you dive deeper, it is recommended that you check the following resources if you're new to 🤗 Transformers: +- [General overview of 🤗 Transformers](add_new_model#general-overview-of-transformers) +- [Hugging Face's TensorFlow Philosophy](https://huggingface.co/blog/tensorflow-philosophy) + +In the remainder of this guide, you will learn what's needed to add a new TensorFlow model architecture, the +procedure to convert PyTorch into TensorFlow model weights, and how to efficiently debug mismatches across ML +frameworks. Let's get started! + + + +Are you unsure whether the model you wish to use already has a corresponding TensorFlow architecture? + +  + +Check the `model_type` field of the `config.json` of your model of choice +([example](https://huggingface.co/bert-base-uncased/blob/main/config.json#L14)). If the corresponding model folder in +🤗 Transformers has a file whose name starts with "modeling_tf", it means that it has a corresponding TensorFlow +architecture ([example](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert)). + + + + +## Step-by-step guide to add TensorFlow model architecture code + +There are many ways to design a large model architecture, and multiple ways of implementing said design. However, +you might recall from our [general overview of 🤗 Transformers](add_new_model#general-overview-of-transformers) +that we are an opinionated bunch - the ease of use of 🤗 Transformers relies on consistent design choices. From +experience, we can tell you a few important things about adding TensorFlow models: + +- Don't reinvent the wheel! More often that not, there are at least two reference implementations you should check: the +PyTorch equivalent of the model you are implementing and other TensorFlow models for the same class of problems. +- Great model implementations survive the test of time. This doesn't happen because the code is pretty, but rather +because the code is clear, easy to debug and build upon. If you make the life of the maintainers easy with your +TensorFlow implementation, by replicating the same patterns as in other TensorFlow models and minimizing the mismatch +to the PyTorch implementation, you ensure your contribution will be long lived. +- Ask for help when you're stuck! The 🤗 Transformers team is here to help, and we've probably found solutions to the same +problems you're facing. + +Here's an overview of the steps needed to add a TensorFlow model architecture: +1. Select the model you wish to convert +2. Prepare transformers dev environment +3. (Optional) Understand theoretical aspects and the existing implementation +4. Implement the model architecture +5. Implement model tests +6. Submit the pull request +7. (Optional) Build demos and share with the world + +### 1.-3. Prepare your model contribution + +**1. Select the model you wish to convert** + +Let's start off with the basics: the first thing you need to know is the architecture you want to convert. If you +don't have your eyes set on a specific architecture, asking the 🤗 Transformers team for suggestions is a great way to +maximize your impact - we will guide you towards the most prominent architectures that are missing on the TensorFlow +side. If the specific model you want to use with TensorFlow already has a TensorFlow architecture implementation in +🤗 Transformers but is lacking weights, feel free to jump straight into the +[weight conversion section](#adding-tensorflow-weights-to-hub) +of this page. + +For simplicity, the remainder of this guide assumes you've decided to contribute with the TensorFlow version of +*BrandNewBert* (the same example as in the [guide](add_new_model) to add a new model from scratch). + + + +Before starting the work on a TensorFlow model architecture, double-check that there is no ongoing effort to do so. +You can search for `BrandNewBert` on the +[pull request GitHub page](https://github.com/huggingface/transformers/pulls?q=is%3Apr) to confirm that there is no +TensorFlow-related pull request. + + + + +**2. Prepare transformers dev environment** + +Having selected the model architecture, open an draft PR to signal your intention to work on it. Follow the +instructions below to set up your environment and open a draft PR. + +1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the 'Fork' button on the + repository's page. This creates a copy of the code under your GitHub user account. + +2. Clone your `transformers` fork to your local disk, and add the base repository as a remote: + +```bash +git clone https://github.com/[your Github handle]/transformers.git +cd transformers +git remote add upstream https://github.com/huggingface/transformers.git +``` + +3. Set up a development environment, for instance by running the following command: + +```bash +python -m venv .env +source .env/bin/activate +pip install -e ".[dev]" +``` + +**Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient. + +4. Create a branch with a descriptive name from your main branch + +```bash +git checkout -b add_tf_brand_new_bert +``` + +5. Fetch and rebase to current main + +```bash +git fetch upstream +git rebase upstream/main +``` + +6. Add an empty `.py` file in `transformers/src/models/brandnewbert/` named `modeling_tf_brandnewbert.py`. This will +be your TensorFlow model file. + +7. Push the changes to your account using: + +```bash +git add . +git commit -m "initial commit" +git push -u origin add_tf_brand_new_bert +``` + +8. Once you are satisfied, go to the webpage of your fork on GitHub. Click on “Pull request”. Make sure to add the + GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for + future changes. + +9. Change the PR into a draft by clicking on “Convert to draft” on the right of the GitHub pull request web page. + + +Now you have set up a development environment to port *BrandNewBert* to TensorFlow in 🤗 Transformers. + + +**3. (Optional) Understand theoretical aspects and the existing implementation** + +You should take some time to read *BrandNewBert's* paper, if such descriptive work exists. There might be large +sections of the paper that are difficult to understand. If this is the case, this is fine - don't worry! The goal is +not to get a deep theoretical understanding of the paper, but to extract the necessary information required to +effectively re-implement the model in 🤗 Transformers using TensorFlow. That being said, you don't have to spend too +much time on the theoretical aspects, but rather focus on the practical ones, namely the existing model documentation +page (e.g. [model docs for BERT](model_doc/bert)). + +After you've grasped the basics of the models you are about to implement, it's important to understand the existing +implementation. This is a great chance to confirm that a working implementation matches your expectations for the +model, as well as to foresee technical challenges on the TensorFlow side. + +It's perfectly natural that you feel overwhelmed with the amount of information that you've just absorbed. It is +definitely not a requirement that you understand all facets of the model at this stage. Nevertheless, we highly +encourage you to clear any pressing questions in our [forum](https://discuss.huggingface.co/). + + +### 4. Model implementation + +Now it's time to finally start coding. Our suggested starting point is the PyTorch file itself: copy the contents of +`modeling_brand_new_bert.py` inside `src/transformers/models/brand_new_bert/` into +`modeling_tf_brand_new_bert.py`. The goal of this section is to modify the file and update the import structure of +🤗 Transformers such that you can import `TFBrandNewBert` and +`TFBrandNewBert.from_pretrained(model_repo, from_pt=True)` sucessfully loads a working TensorFlow *BrandNewBert* model. + +Sadly, there is no prescription to convert a PyTorch model into TensorFlow. You can, however, follow our selection of +tips to make the process as smooth as possible: +- Prepend `TF` to the name of all classes (e.g. `BrandNewBert` becomes `TFBrandNewBert`). +- Most PyTorch operations have a direct TensorFlow replacement. For example, `torch.nn.Linear` corresponds to + `tf.keras.layers.Dense`, `torch.nn.Dropout` corresponds to `tf.keras.layers.Dropout`, etc. If you're not sure + about a specific operation, you can use the [TensorFlow documentation](https://www.tensorflow.org/api_docs/python/tf) + or the [PyTorch documentation](https://pytorch.org/docs/stable/). +- Look for patterns in the 🤗 Transformers codebase. If you come across a certain operation that doesn't have a direct + replacement, the odds are that someone else already had the same problem. +- By default, keep the same variable names and structure as in PyTorch. This will make it easier to debug, track + issues, and add fixes down the line. +- Some layers have different default values in each framework. A notable example is the batch normalization layer's + epsilon (`1e-5` in [PyTorch](https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html#torch.nn.BatchNorm2d) + and `1e-3` in [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization)). + Double-check the documentation! +- PyTorch's `nn.Parameter` variables typically need to be initialized within TF Layer's `build()`. See the following + example: [PyTorch](https://github.com/huggingface/transformers/blob/655f72a6896c0533b1bdee519ed65a059c2425ac/src/transformers/models/vit_mae/modeling_vit_mae.py#L212) / + [TensorFlow](https://github.com/huggingface/transformers/blob/655f72a6896c0533b1bdee519ed65a059c2425ac/src/transformers/models/vit_mae/modeling_tf_vit_mae.py#L220) +- If the PyTorch model has a `#copied from ...` on top of a function, the odds are that your TensorFlow model can also + borrow that function from the architecture it was copied from, assuming it has a TensorFlow architecture. +- Assigning the `name` attribute correctly in TensorFlow functions is critical to do the `from_pt=True` weight + cross-loading. `name` is almost always the name of the corresponding variable in the PyTorch code. If `name` is not + properly set, you will see it in the error message when loading the model weights. +- The logic of the base model class, `BrandNewBertModel`, will actually reside in `TFBrandNewBertMainLayer`, a Keras + layer subclass ([example](https://github.com/huggingface/transformers/blob/4fd32a1f499e45f009c2c0dea4d81c321cba7e02/src/transformers/models/bert/modeling_tf_bert.py#L719)). + `TFBrandNewBertModel` will simply be a wrapper around this layer. +- Keras models need to be built in order to load pretrained weights. For that reason, `TFBrandNewBertPreTrainedModel` + will need to hold an example of inputs to the model, the `dummy_inputs` + ([example](https://github.com/huggingface/transformers/blob/4fd32a1f499e45f009c2c0dea4d81c321cba7e02/src/transformers/models/bert/modeling_tf_bert.py#L916)). +- If you get stuck, ask for help - we're here to help you! 🤗 + +In addition to the model file itself, you will also need to add the pointers to the model classes and related +documentation pages. You can complete this part entirely following the patterns in other PRs +([example](https://github.com/huggingface/transformers/pull/18020/files)). Here's a list of the needed manual +changes: +- Include all public classes of *BrandNewBert* in `src/transformers/__init__.py` +- Add *BrandNewBert* classes to the corresponing Auto classes in `src/transformers/models/auto/modeling_tf_auto.py` +- Include the modeling file in the documentation test file list in `utils/documentation_tests.txt` +- Add the lazy loading classes related to *BrandNewBert* in `src/transformers/utils/dummy_tf_objects.py` +- Update the import structures for the public classes in `src/transformers/models/brand_new_bert/__init__.py` +- Add the documentation pointers to the public methods of *BrandNewBert* in `docs/source/en/model_doc/brand_new_bert.mdx` +- Add yourself to the list of contributors to *BrandNewBert* in `docs/source/en/model_doc/brand_new_bert.mdx` +- Finally, add a green tick ✅ to the TensorFlow column of *BrandNewBert* in `docs/source/en/index.mdx` + +When you're happy with your implementation, run the following checklist to confirm that your model architecture is +ready: +1. All layers that behave differently at train time (e.g. Dropout) are called with a `training` argument, which is +propagated all the way from the top-level classes +2. You have used `#copied from ...` whenever possible +3. `TFBrandNewBertMainLayer` and all classes that use it have their `call` function decorated with `@unpack_inputs` +4. `TFBrandNewBertMainLayer` is decorated with `@keras_serializable` +5. A TensorFlow model can be loaded from PyTorch weights using `TFBrandNewBert.from_pretrained(model_repo, from_pt=True)` +6. You can call the TensorFlow model using the expected input format + + +### 5. Add model tests + +Hurray, you've implemented a TensorFlow model! Now it's time to add tests to make sure that your model behaves as +expected. As in the previous section, we suggest you start by copying the `test_modeling_brand_new_bert.py` file in +`tests/models/brand_new_bert/` into `test_modeling_tf_brand_new_bert.py`, and continue by making the necessary +TensorFlow replacements. For now, in all `.from_pretrained()` calls, you should use the `from_pt=True` flag to load +the existing PyTorch weights. + +After you're done, it's time for the moment of truth: run the tests! 😬 + +```bash +NVIDIA_TF32_OVERRIDE=0 RUN_SLOW=1 RUN_PT_TF_CROSS_TESTS=1 \ +py.test -vv tests/models/brand_new_bert/test_modeling_tf_brand_new_bert.py +``` + +The most likely outcome is that you'll see a bunch of errors. Don't worry, this is expected! Debugging ML models is +notoriously hard, and the key ingredient to success is patience (and `breakpoint()`). In our experience, the hardest +problems arise from subtle mismatches between ML frameworks, for which we have a few pointers at the end of this guide. +In other cases, a general test might not be directly applicable to your model, in which case we suggest an override +at the model test class level. Regardless of the issue, don't hesitate to ask for help in your draft pull request if +you're stuck. + +When all tests pass, congratulations, your model is nearly ready to be added to the 🤗 Transformers library! 🎉 + +### 6.-7. Ensure everyone can use your model + +**6. Submit the pull request** + +Once you're done with the implementation and the tests, it's time to submit a pull request. Before pushing your code, +run our code formatting utility, `make fixup` 🪄. This will automatically fix any formatting issues, which would cause +our automatic checks to fail. + +It's now time to convert your draft pull request into a real pull request. To do so, click on the "Ready for +review" button and add Joao (`@gante`) and Matt (`@Rocketknight1`) as reviewers. A model pull request will need +at least 3 reviewers, but they will take care of finding appropriate additional reviewers for your model. + +After all reviewers are happy with the state of your PR, the final action point is to remove the `from_pt=True` flag in +`.from_pretrained()` calls. Since there are no TensorFlow weights, you will have to add them! Check the section +below for instructions on how to do it. + +Finally, when the TensorFlow weights get merged, you have at least 3 reviewer approvals, and all CI checks are +green, double-check the tests locally one last time + +```bash +NVIDIA_TF32_OVERRIDE=0 RUN_SLOW=1 RUN_PT_TF_CROSS_TESTS=1 \ +py.test -vv tests/models/brand_new_bert/test_modeling_tf_brand_new_bert.py +``` + +and we will merge your PR! Congratulations on the milestone 🎉 + +**7. (Optional) Build demos and share with the world** + +One of the hardest parts about open-source is discovery. How can the other users learn about the existence of your +fabulous TensorFlow contribution? With proper communication, of course! 📣 + +There are two main ways to share your model with the community: +- Build demos. These include Gradio demos, notebooks, and other fun ways to show off your model. We highly + encourage you to add a notebook to our [community-driven demos](https://huggingface.co/docs/transformers/community). +- Share stories on social media like Twitter and LinkedIn. You should be proud of your work and share + your achievement with the community - your model can now be used by thousands of engineers and researchers around + the world 🌍! We will be happy to retweet your posts and help you share your work with the community. + + +## Adding TensorFlow weights to 🤗 Hub + +Assuming that the TensorFlow model architecture is available in 🤗 Transformers, converting PyTorch weights into +TensorFlow weights is a breeze! + +Here's how to do it: +1. Make sure you are logged into your Hugging Face account in your terminal. You can log in using the command + `huggingface-cli login` (you can find your access tokens [here](https://huggingface.co/settings/tokens)) +2. Run `transformers-cli pt-to-tf --model-name foo/bar`, where `foo/bar` is the name of the model repository + containing the PyTorch weights you want to convert +3. Tag `@joaogante` and `@Rocketknight1` in the 🤗 Hub PR the command above has just created + +That's it! 🎉 + + +## Debugging mismatches across ML frameworks 🐛 + +At some point, when adding a new architecture or when creating TensorFlow weights for an existing architecture, you +might come across errors compaining about mismatches between PyTorch and TensorFlow. You might even decide to open the +model architecture code for the two frameworks, and find that they look identical. What's going on? 🤔 + +First of all, let's talk about why understanding these mismatches matters. Many community members will use 🤗 +Transformers models out of the box, and trust that our models behave as expected. When there is a large mismatch +between the two frameworks, it implies that the model is not following the reference implementation for at least one +of the frameworks. This might lead to silent failures, in which the model runs but has poor performance. This is +arguably worse than a model that fails to run at all! To that end, we aim at having a framework mismatch smaller than +`1e-5` at all stages of the model. + +As in other numerical problems, the devil is in the details. And as in any detail-oriented craft, the secret +ingredient here is patience. Here is our suggested workflow for when you come across this type of issues: +1. Locate the source of mismatches. The model you're converting probably has near identical inner variables up to a + certain point. Place `breakpoint()` statements in the two frameworks' architectures, and compare the values of the + numerical variables in a top-down fashion until you find the source of the problems. +2. Now that you've pinpointed the source of the issue, get in touch with the 🤗 Transformers team. It is possible + that we've seen a similar problem before and can promptly provide a solution. As a fallback, scan popular pages + like StackOverflow and GitHub issues. +3. If there is no solution in sight, it means you'll have to go deeper. The good news is that you've located the + issue, so you can focus on the problematic instruction, abstracting away the rest of the model! The bad news is + that you'll have to venture into the source implementation of said instruction. In some cases, you might find an + issue with a reference implementation - don't abstain from opening an issue in the upstream repository. + +In some cases, in dicussion with the 🤗 Transformers team, we might find that the fixing the mismatch is infeasible. +When the mismatch is very small in the output layers of the model (but potentially large in the hidden states), we +might decide to ignore it in favor of distributing the model. The `pt-to-tf` CLI mentioned above has a `--max-error` +flag to override the error message at weight conversion time. diff --git a/docs/source/en/converting_tensorflow_models.mdx b/docs/source/en/converting_tensorflow_models.mdx index c11e4e62b8086e..8dc51dd61670d8 100644 --- a/docs/source/en/converting_tensorflow_models.mdx +++ b/docs/source/en/converting_tensorflow_models.mdx @@ -10,7 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Converting Tensorflow Checkpoints +# Converting From Tensorflow Checkpoints A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints to models that can be loaded using the `from_pretrained` methods of the library. From 5cd16f01db3b5499d4665e8624801ed30ba87bdd Mon Sep 17 00:00:00 2001 From: Kashif Rasul Date: Fri, 30 Sep 2022 21:32:59 +0200 Subject: [PATCH 435/539] time series forecasting model (#17965) * initial files * initial model via cli * typos * make a start on the model config * ready with configuation * remove tokenizer ref. * init the transformer * added initial model forward to return dec_output * require gluonts * update dep. ver table and add as extra * fixed typo * add type for prediction_length * use num_time_features * use config * more config * typos * opps another typo * freq can be none * default via transformation is 1 * initial transformations * fix imports * added transform_start_field * add helper to create pytorch dataloader * added inital val and test data loader * added initial distr head and loss * training working * remove TimeSeriesTransformerTokenizer Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/__init__.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/__init__.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixed copyright * removed docs * remove time series tokenizer * fixed docs * fix text * fix second * fix default * fix order * use config directly * undo change * fix comment * fix year * fix import * add additional arguments for training vs. test * initial greedy inference loop * fix inference * comment out token inputs to enc dec * Use HF encoder/decoder * fix inference * Use Seq2SeqTSModelOutput output * return Seq2SeqTSPredictionOutput * added default arguments * fix return_dict true * scale is a tensor * output static_features for inference * clean up some unused bits * fixed typo * set return_dict if none * call model once for both train/predict * use cache if future_target is none * initial generate func * generate arguments * future_time_feat is required * return SampleTSPredictionOutput * removed unneeded classes * fix when params is none * fix return dict * fix num_attention_heads * fix arguments * remove unused shift_tokens_right * add different dropout configs * implement FeatureEmbedder, Scaler and weighted_average * remove gluonts dependency * fix class names * avoid _variable names * remove gluonts dependency * fix imports * remove gluonts from configuration * fix docs * fixed typo * move utils to examples * add example requirements * config has no freq * initial run_ts_no_trainer * remove from ignore * fix output_attentions and removed unsued getters/setters * removed unsed tests * add dec seq len * add test_attention_outputs * set has_text_modality=False * add config attribute_map * make style * make fix-copies * add encoder_outputs to TimeSeriesTransformerForPrediction forward * Improve docs, add model to README * added test_forward_signature * More improvements * Add more copied from * Fix README * Fix remaining quality issues * updated encoder and decoder * fix generate * output_hidden_states and use_cache are optional * past key_values returned too * initialize weights of distribution_output module * fixed more tests * update test_forward_signature * fix return_dict outputs * Update src/transformers/models/time_series_transformer/configuration_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/configuration_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/configuration_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/configuration_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/modeling_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/modeling_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/modeling_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * removed commented out tests * added neg. bin and normal output * Update src/transformers/models/time_series_transformer/configuration_time_series_transformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * move to one line * Add docstrings * Update src/transformers/models/time_series_transformer/configuration_time_series_transformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * add try except for assert and raise * try and raise exception * fix the documentation formatting * fix assert call * fix docstring formatting * removed input_ids from DOCSTRING * Update input docstring * Improve variable names * Update order of inputs * Improve configuration * Improve variable names * Improve docs * Remove key_length from tests * Add extra docs * initial unittests * added test_inference_no_head test * added test_inference_head * add test_seq_to_seq_generation * make style * one line * assert mean prediction * removed comments * Update src/transformers/models/time_series_transformer/modeling_time_series_transformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/modeling_time_series_transformer.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fix order of args * make past_observed_mask optional as well * added Amazon license header * updated utils with new fieldnames * make style * cleanup * undo position of past_observed_mask * fix import * typo * more typo * rename example files * remove example for now * Update docs/source/en/_toctree.yml Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/configuration_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/modeling_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/time_series_transformer/modeling_time_series_transformer.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update modeling_time_series_transformer.py fix style * fixed typo * fix typo and grammer * fix style Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: NielsRogge Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 5 + docs/source/en/index.mdx | 2 + .../en/model_doc/time_series_transformer.mdx | 73 + setup.py | 1 + src/transformers/__init__.py | 23 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + src/transformers/models/auto/modeling_auto.py | 1 + .../time_series_transformer/__init__.py | 67 + .../configuration_time_series_transformer.py | 229 ++ .../modeling_time_series_transformer.py | 1954 +++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 24 + .../time_series_transformer/__init__.py | 0 .../test_modeling_time_series_transformer.py | 438 ++++ utils/check_repo.py | 3 + 19 files changed, 2828 insertions(+) create mode 100644 docs/source/en/model_doc/time_series_transformer.mdx create mode 100644 src/transformers/models/time_series_transformer/__init__.py create mode 100644 src/transformers/models/time_series_transformer/configuration_time_series_transformer.py create mode 100644 src/transformers/models/time_series_transformer/modeling_time_series_transformer.py create mode 100644 tests/models/time_series_transformer/__init__.py create mode 100644 tests/models/time_series_transformer/test_modeling_time_series_transformer.py diff --git a/README.md b/README.md index 1d10617dbacf7d..4eb429652ab013 100644 --- a/README.md +++ b/README.md @@ -375,6 +375,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). 1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. diff --git a/README_ko.md b/README_ko.md index ca21d265f2f739..c591b50417ad9f 100644 --- a/README_ko.md +++ b/README_ko.md @@ -325,6 +325,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). 1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. diff --git a/README_zh-hans.md b/README_zh-hans.md index af39a369679dcf..36b33982d0a130 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -349,6 +349,7 @@ conda install -c huggingface transformers 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (来自 Microsoft Research) 伴随论文 [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) 由 Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou 发布。 +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). 1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 3a3122af87b7a1..eef6a3589f4efd 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -361,6 +361,7 @@ conda install -c huggingface transformers 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). 1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 08f3320047ed09..3ce68ad8598a1e 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -498,6 +498,11 @@ - local: model_doc/trajectory_transformer title: Trajectory Transformer title: Reinforcement learning models + - isExpanded: false + sections: + - local: model_doc/time_series_transformer + title: Time Series Transformer + title: Time series models title: Models - sections: - local: internal/modeling_utils diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 8b5defb96e725e..1b862df0b0e4c8 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -165,6 +165,7 @@ The documentation is organized into five sections: 1. **[T5v1.1](model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. 1. **[TAPEX](model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. +1. **[Time Series Transformer](model_doc/time_series_transformer)** (from HuggingFace). 1. **[Trajectory Transformer](model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. @@ -310,6 +311,7 @@ Flax), PyTorch, and/or TensorFlow. | Swin Transformer V2 | ❌ | ❌ | ✅ | ❌ | ❌ | | T5 | ✅ | ✅ | ✅ | ✅ | ✅ | | TAPAS | ✅ | ❌ | ✅ | ✅ | ❌ | +| Time Series Transformer | ❌ | ❌ | ✅ | ❌ | ❌ | | Trajectory Transformer | ❌ | ❌ | ✅ | ❌ | ❌ | | Transformer-XL | ✅ | ❌ | ✅ | ✅ | ❌ | | TrOCR | ❌ | ❌ | ✅ | ❌ | ❌ | diff --git a/docs/source/en/model_doc/time_series_transformer.mdx b/docs/source/en/model_doc/time_series_transformer.mdx new file mode 100644 index 00000000000000..5dedef02eaa80e --- /dev/null +++ b/docs/source/en/model_doc/time_series_transformer.mdx @@ -0,0 +1,73 @@ + + +# Time Series Transformer + + + +This is a recently introduced model so the API hasn't been tested extensively. There may be some bugs or slight +breaking changes to fix it in the future. If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title). + + + +## Overview + +The Time Series Transformer model is a vanilla encoder-decoder Transformer for time series forecasting. + +Tips: + +- Similar to other models in the library, [`TimeSeriesTransformerModel`] is the raw Transformer without any head on top, and [`TimeSeriesTransformerForPrediction`] +adds a distribution head on top of the former, which can be used for time-series forecasting. Note that this is a so-called probabilistic forecasting model, not a +point forecasting model. This means that the model learns a distribution, from which one can sample. The model doesn't directly output values. +- [`TimeSeriesTransformerForPrediction`] consists of 2 blocks: an encoder, which takes a `context_length` of time series values as input (called `past_values`), +and a decoder, which predicts a `prediction_length` of time series values into the future (called `future_values`). During training, one needs to provide +pairs of (`past_values` and `future_values`) to the model. +- In addition to the raw (`past_values` and `future_values`), one typically provides additional features to the model. These can be the following: + - `past_time_features`: temporal features which the model will add to `past_values`. These serve as "positional encodings" for the Transformer encoder. + Examples are "day of the month", "month of the year", etc. as scalar values (and then stacked together as a vector). + e.g. if a given time-series value was obtained on the 11th of August, then one could have [11, 8] as time feature vector (11 being "day of the month", 8 being "month of the year"). + - `future_time_features`: temporal features which the model will add to `future_values`. These serve as "positional encodings" for the Transformer decoder. + Examples are "day of the month", "month of the year", etc. as scalar values (and then stacked together as a vector). + e.g. if a given time-series value was obtained on the 11th of August, then one could have [11, 8] as time feature vector (11 being "day of the month", 8 being "month of the year"). + - `static_categorical_features`: categorical features which are static over time (i.e., have the same value for all `past_values` and `future_values`). + An example here is the store ID or region ID that identifies a given time-series. + Note that these features need to be known for ALL data points (also those in the future). + - `static_real_features`: real-valued features which are static over time (i.e., have the same value for all `past_values` and `future_values`). + An example here is the image representation of the product for which you have the time-series values (like the [ResNet](resnet) embedding of a "shoe" picture, + if your time-series is about the sales of shoes). + Note that these features need to be known for ALL data points (also those in the future). +- The model is trained using "teacher-forcing", similar to how a Transformer is trained for machine translation. This means that, during training, one shifts the +`future_values` one position to the right as input to the decoder, prepended by the last value of `past_values`. At each time step, the model needs to predict the +next target. So the set-up of training is similar to a GPT model for language, except that there's no notion of `decoder_start_token_id` (we just use the last value +of the context as initial input for the decoder). +- At inference time, we give the final value of the `past_values` as input to the decoder. Next, we can sample from the model to make a prediction at the next time step, +which is then fed to the decoder in order to make the next prediction (also called autoregressive generation). + + +This model was contributed by [kashif]( 1 in case of + multivarate targets. + scaling (`bool`, *optional* defaults to `True`): + Whether to scale the input targets. + lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`): + The lags of the input time series as covariates often dictated by the frequency. Default is `[1, 2, 3, 4, + 5, 6, 7]`. + num_time_features (`int`, *optional*, defaults to 0): + The number of time features in the input time series. + num_dynamic_real_features (`int`, *optional*, defaults to 0): + The number of dynamic real valued features. + num_static_categorical_features (`int`, *optional*, defaults to 0): + The number of static categorical features. + num_static_real_features (`int`, *optional*, defaults to 0): + The number of static real valued features. + cardinality (`list[int]`, *optional*): + The cardinality (number of different values) for each of the static categorical features. Should be a list + of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if + `num_static_categorical_features` is > 0. + embedding_dimension (`list[int]`, *optional*): + The dimension of the embedding for each of the static categorical features. Should be a list of integers, + having the same length as `num_static_categorical_features`. Cannot be `None` if + `num_static_categorical_features` is > 0. + encoder_layers (`int`, *optional*, defaults to 2): + Number of encoder layers. + decoder_layers (`int`, *optional*, defaults to 2): + Number of decoder layers. + encoder_attention_heads (`int`, *optional*, defaults to 2): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_attention_heads (`int`, *optional*, defaults to 2): + Number of attention heads for each attention layer in the Transformer decoder. + encoder_ffn_dim (`int`, *optional*, defaults to 32): + Dimension of the "intermediate" (often named feed-forward) layer in encoder. + decoder_ffn_dim (`int`, *optional*, defaults to 32): + Dimension of the "intermediate" (often named feed-forward) layer in decoder. + activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and + `"relu"` are supported. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the encoder, and decoder. + encoder_layerdrop (`float`, *optional*, defaults to 0.1): + The dropout probability for the attention and fully connected layers for each encoder layer. + decoder_layerdrop (`float`, *optional*, defaults to 0.1): + The dropout probability for the attention and fully connected layers for each decoder layer. + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.1): + The dropout probability used between the two layers of the feed-forward networks. + num_parallel_samples (`int`, *optional*, defaults to 100): + The number of samples to generate in parallel for each time step of inference. + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated normal weight initialization distribution. + use_cache (`bool`, *optional*, defaults to `True`): + Whether to use the past key/values attentions (if applicable to the model) to speed up decoding. + + Example: + + ```python + >>> from transformers import TimeSeriesTransformerConfig, TimeSeriesTransformerModel + + >>> # Initializing a default Time Series Transformer configuration + >>> configuration = TimeSeriesTransformerConfig() + + >>> # Randomly initializing a model from the configuration + >>> model = TimeSeriesTransformerModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "time_series_transformer" + attribute_map = { + "hidden_size": "d_model", + "num_attention_heads": "encoder_attention_heads", + "num_hidden_layers": "encoder_layers", + } + + def __init__( + self, + input_size: int = 1, + prediction_length: Optional[int] = None, + context_length: Optional[int] = None, + distribution_output: str = "student_t", + loss: str = "nll", + lags_sequence: List[int] = [1, 2, 3, 4, 5, 6, 7], + scaling: bool = True, + num_dynamic_real_features: int = 0, + num_static_categorical_features: int = 0, + num_static_real_features: int = 0, + num_time_features: int = 0, + cardinality: Optional[List[int]] = None, + embedding_dimension: Optional[List[int]] = None, + encoder_ffn_dim: int = 32, + decoder_ffn_dim: int = 32, + encoder_attention_heads: int = 2, + decoder_attention_heads: int = 2, + encoder_layers: int = 2, + decoder_layers: int = 2, + is_encoder_decoder: bool = True, + activation_function: str = "gelu", + dropout: float = 0.1, + encoder_layerdrop: float = 0.1, + decoder_layerdrop: float = 0.1, + attention_dropout: float = 0.1, + activation_dropout: float = 0.1, + num_parallel_samples: int = 100, + init_std: float = 0.02, + use_cache=True, + **kwargs + ): + # time series specific configuration + self.prediction_length = prediction_length + self.context_length = context_length or prediction_length + self.distribution_output = distribution_output + self.loss = loss + self.input_size = input_size + self.num_time_features = num_time_features + self.lags_sequence = lags_sequence + self.scaling = scaling + self.num_dynamic_real_features = num_dynamic_real_features + self.num_static_real_features = num_static_real_features + self.num_static_categorical_features = num_static_categorical_features + if cardinality and num_static_categorical_features > 0: + if len(cardinality) != num_static_categorical_features: + raise ValueError( + "The cardinality should be a list of the same length as `num_static_categorical_features`" + ) + self.cardinality = cardinality + else: + self.cardinality = [1] + if embedding_dimension and num_static_categorical_features > 0: + if len(embedding_dimension) != num_static_categorical_features: + raise ValueError( + "The embedding dimension should be a list of the same length as `num_static_categorical_features`" + ) + self.embedding_dimension = embedding_dimension + else: + self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality] + self.num_parallel_samples = num_parallel_samples + + # Transformer architecture configuration + self.d_model = input_size * len(lags_sequence) + self._number_of_features + self.encoder_attention_heads = encoder_attention_heads + self.decoder_attention_heads = decoder_attention_heads + self.encoder_ffn_dim = encoder_ffn_dim + self.decoder_ffn_dim = decoder_ffn_dim + self.encoder_layers = encoder_layers + self.decoder_layers = decoder_layers + + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.encoder_layerdrop = encoder_layerdrop + self.decoder_layerdrop = decoder_layerdrop + + self.activation_function = activation_function + self.init_std = init_std + + self.output_attentions = False + self.output_hidden_states = False + + self.use_cache = use_cache + + super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs) + + @property + def _number_of_features(self) -> int: + return ( + sum(self.embedding_dimension) + + self.num_dynamic_real_features + + self.num_time_features + + max(1, self.num_static_real_features) # there is at least one dummy static real feature + + 1 # the log(scale) + ) diff --git a/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py new file mode 100644 index 00000000000000..bf39ae17564d40 --- /dev/null +++ b/src/transformers/models/time_series_transformer/modeling_time_series_transformer.py @@ -0,0 +1,1954 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Time Series Transformer model.""" + +import random +from dataclasses import dataclass +from typing import Callable, Dict, List, Optional, Tuple, Union + +import torch +from torch import nn +from torch.distributions import ( + AffineTransform, + Distribution, + NegativeBinomial, + Normal, + StudentT, + TransformedDistribution, +) + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, ModelOutput +from ...modeling_utils import PreTrainedModel +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_time_series_transformer import TimeSeriesTransformerConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "TimeSeriesTransformerConfig" + + +TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "huggingface/time-series-transformer-tourism-monthly", + # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer +] + + +class AffineTransformed(TransformedDistribution): + def __init__(self, base_distribution: Distribution, loc=None, scale=None): + self.scale = 1.0 if scale is None else scale + self.loc = 0.0 if loc is None else loc + + super().__init__(base_distribution, [AffineTransform(self.loc, self.scale)]) + + @property + def mean(self): + """ + Returns the mean of the distribution. + """ + return self.base_dist.mean * self.scale + self.loc + + @property + def variance(self): + """ + Returns the variance of the distribution. + """ + return self.base_dist.variance * self.scale**2 + + @property + def stddev(self): + """ + Returns the standard deviation of the distribution. + """ + return self.variance.sqrt() + + +class ParameterProjection(nn.Module): + def __init__( + self, + in_features: int, + args_dim: Dict[str, int], + domain_map: Callable[..., Tuple[torch.Tensor]], + **kwargs, + ) -> None: + super().__init__(**kwargs) + self.args_dim = args_dim + self.proj = nn.ModuleList([nn.Linear(in_features, dim) for dim in args_dim.values()]) + self.domain_map = domain_map + + def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor]: + params_unbounded = [proj(x) for proj in self.proj] + + return self.domain_map(*params_unbounded) + + +class LambdaLayer(nn.Module): + def __init__(self, function): + super().__init__() + self.function = function + + def forward(self, x, *args): + return self.function(x, *args) + + +class DistributionOutput: + distr_cls: type + in_features: int + args_dim: Dict[str, int] + + def __init__(self) -> None: + pass + + def _base_distribution(self, distr_args): + return self.distr_cls(*distr_args) + + def distribution( + self, + distr_args, + loc: Optional[torch.Tensor] = None, + scale: Optional[torch.Tensor] = None, + ) -> Distribution: + distr = self._base_distribution(distr_args) + if loc is None and scale is None: + return distr + else: + return AffineTransformed(distr, loc=loc, scale=scale) + + @property + def event_shape(self) -> Tuple: + r""" + Shape of each individual event contemplated by the distributions that this object constructs. + """ + raise NotImplementedError() + + @property + def event_dim(self) -> int: + r""" + Number of event dimensions, i.e., length of the `event_shape` tuple, of the distributions that this object + constructs. + """ + return len(self.event_shape) + + @property + def value_in_support(self) -> float: + r""" + A float that will have a valid numeric value when computing the log-loss of the corresponding distribution. By + default 0.0. This value will be used when padding data series. + """ + return 0.0 + + def get_parameter_projection(self, in_features: int) -> nn.Module: + r""" + Return the parameter projection layer that maps the input to the appropriate parameters of the distribution. + """ + return ParameterProjection( + in_features=in_features, + args_dim=self.args_dim, + domain_map=LambdaLayer(self.domain_map), + ) + + def domain_map(self, *args: torch.Tensor): + r""" + Converts arguments to the right shape and domain. The domain depends on the type of distribution, while the + correct shape is obtained by reshaping the trailing axis in such a way that the returned tensors define a + distribution of the right event_shape. + """ + raise NotImplementedError() + + @classmethod + def squareplus(cls, x: torch.Tensor) -> torch.Tensor: + r""" + Helper to map inputs to the positive orthant by applying the square-plus operation. Reference: + https://twitter.com/jon_barron/status/1387167648669048833 + """ + return (x + torch.sqrt(torch.square(x) + 4.0)) / 2.0 + + +class StudentTOutput(DistributionOutput): + args_dim: Dict[str, int] = {"df": 1, "loc": 1, "scale": 1} + distr_cls: type = StudentT + + @classmethod + def domain_map(cls, df: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor): + scale = cls.squareplus(scale) + df = 2.0 + cls.squareplus(df) + return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1) + + @property + def event_shape(self) -> Tuple: + return () + + +class NormalOutput(DistributionOutput): + args_dim: Dict[str, int] = {"loc": 1, "scale": 1} + distr_cls: type = Normal + + @classmethod + def domain_map(cls, loc: torch.Tensor, scale: torch.Tensor): + scale = cls.squareplus(scale) + return loc.squeeze(-1), scale.squeeze(-1) + + @property + def event_shape(self) -> Tuple: + return () + + +class NegativeBinomialOutput(DistributionOutput): + args_dim: Dict[str, int] = {"total_count": 1, "logits": 1} + distr_cls: type = NegativeBinomial + + @classmethod + def domain_map(cls, total_count: torch.Tensor, logits: torch.Tensor): + total_count = cls.squareplus(total_count) + return total_count.squeeze(-1), logits.squeeze(-1) + + def _base_distribution(self, distr_args) -> Distribution: + total_count, logits = distr_args + return self.distr_cls(total_count=total_count, logits=logits) + + # Overwrites the parent class method. We cannot scale using the affine + # transformation since negative binomial should return integers. Instead + # we scale the parameters. + def distribution( + self, + distr_args, + loc: Optional[torch.Tensor] = None, + scale: Optional[torch.Tensor] = None, + ) -> Distribution: + total_count, logits = distr_args + + if scale is not None: + logits += scale.log() + + return NegativeBinomial(total_count=total_count, logits=logits) + + @property + def event_shape(self) -> Tuple: + return () + + +class FeatureEmbedder(nn.Module): + def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None: + super().__init__() + + self.num_features = len(cardinalities) + self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]) + + def forward(self, features: torch.Tensor) -> torch.Tensor: + if self.num_features > 1: + # we slice the last dimension, giving an array of length + # self.num_features with shape (N,T) or (N) + cat_feature_slices = torch.chunk(features, self.num_features, dim=-1) + else: + cat_feature_slices = [features] + + return torch.cat( + [ + embed(cat_feature_slice.squeeze(-1)) + for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices) + ], + dim=-1, + ) + + +class MeanScaler(nn.Module): + """ + Computes a scaling factor as the weighted average absolute value along dimension `dim`, and scales the data + accordingly. + + Args: + dim (`int`): + Dimension along which to compute the scale. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + minimum_scale (`float`, *optional*, defaults to 1e-10): + Default scale that is used for elements that are constantly zero along dimension `dim`. + """ + + def __init__(self, dim: int, keepdim: bool = False, minimum_scale: float = 1e-10): + super().__init__() + if not dim > 0: + raise ValueError("Cannot compute scale along dim = 0 (batch dimension), please provide dim > 0") + self.dim = dim + self.keepdim = keepdim + self.register_buffer("minimum_scale", torch.tensor(minimum_scale)) + + def forward(self, data: torch.Tensor, weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + # these will have shape (N, C) + total_weight = weights.sum(dim=self.dim) + weighted_sum = (data.abs() * weights).sum(dim=self.dim) + + # first compute a global scale per-dimension + total_observed = total_weight.sum(dim=0) + denominator = torch.max(total_observed, torch.ones_like(total_observed)) + default_scale = weighted_sum.sum(dim=0) / denominator + + # then compute a per-item, per-dimension scale + denominator = torch.max(total_weight, torch.ones_like(total_weight)) + scale = weighted_sum / denominator + + # use per-batch scale when no element is observed + # or when the sequence contains only zeros + scale = ( + torch.max( + self.minimum_scale, + torch.where( + weighted_sum > torch.zeros_like(weighted_sum), + scale, + default_scale * torch.ones_like(total_weight), + ), + ) + .detach() + .unsqueeze(dim=self.dim) + ) + + return data / scale, scale if self.keepdim else scale.squeeze(dim=self.dim) + + +class NOPScaler(nn.Module): + """ + Assigns a scaling factor equal to 1 along dimension `dim`, and therefore applies no scaling to the input data. + + Args: + dim (`int`): + Dimension along which to compute the scale. + keepdim (`bool`, *optional*, defaults to `False`): + Controls whether to retain dimension `dim` (of length 1) in the scale tensor, or suppress it. + """ + + def __init__(self, dim: int, keepdim: bool = False): + super().__init__() + self.dim = dim + self.keepdim = keepdim + + def forward(self, data: torch.Tensor, observed_indicator: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + scale = torch.ones_like(data).mean(dim=self.dim, keepdim=self.keepdim) + return data, scale + + +def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: + """ + Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, + meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. + + Args: + input_tensor (`torch.FloatTensor`): + Input tensor, of which the average must be computed. + weights (`torch.FloatTensor`, *optional*): + Weights tensor, of the same shape as `input_tensor`. + dim (`int`, *optional*): + The dim along which to average `input_tensor`. + + Returns: + `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. + """ + if weights is not None: + weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) + sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) + return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights + else: + return input_tensor.mean(dim=dim) + + +class NegativeLogLikelihood: + """ + Computes the negative log likelihood loss. + + Args: + beta (`float`): + Float in range (0, 1). The beta parameter from the paper: "On the Pitfalls of Heteroscedastic Uncertainty + Estimation with Probabilistic Neural Networks" by [Seitzer et al. + 2022](https://openreview.net/forum?id=aPOpXlnV1T). + """ + + beta: float = 0.0 + + def __call__(self, input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: + nll = -input.log_prob(target) + if self.beta > 0.0: + variance = input.variance + nll = nll * (variance.detach() ** self.beta) + return nll + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) + mask_cond = torch.arange(mask.size(-1)) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +@dataclass +class Seq2SeqTimeSeriesModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + scale: (`torch.FloatTensor` of shape `(batch_size,)`, *optional*): + Scaling values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to rescale to the original scale. + static_features: (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): + Static features of each time series' in a batch which are copied to the covariates at inference time. + """ + + last_hidden_state: torch.FloatTensor = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + scale: Optional[torch.FloatTensor] = None + static_features: Optional[torch.FloatTensor] = None + + +@dataclass +class Seq2SeqTimeSeriesPredictionOutput(ModelOutput): + """ + Base class for model's predictions outputs that also contain the loss as well parameters of the chosen + distribution. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when a `future_values` is provided): + Distributional loss. + params (`torch.FloatTensor` of shape `(batch_size, num_samples, num_params)`): + Parameters of the chosen distribution. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the + weighted average in the cross-attention heads. + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + scale: (`torch.FloatTensor` of shape `(batch_size,)`, *optional*): + Scaling values of each time series' context window which is used to give the model inputs of the same + magnitude and then used to rescale to the original scale. + static_features: (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): + Static features of each time series' in a batch which are copied to the covariates at inference time. + """ + + loss: Optional[torch.FloatTensor] = None + params: Optional[Tuple[torch.FloatTensor]] = None + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + scale: Optional[torch.FloatTensor] = None + static_features: Optional[torch.FloatTensor] = None + + +@dataclass +class SampleTimeSeriesPredictionOutput(ModelOutput): + sequences: torch.FloatTensor = None + + +# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->TimeSeriesTransformer +class TimeSeriesTransformerAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned aross GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->TimeSeriesTransformer +class TimeSeriesTransformerEncoderLayer(nn.Module): + def __init__(self, config: TimeSeriesTransformerConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = TimeSeriesTransformerAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_mask: torch.FloatTensor, + layer_head_mask: torch.FloatTensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if hidden_states.dtype == torch.float16 and ( + torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->TimeSeriesTransformer +class TimeSeriesTransformerDecoderLayer(nn.Module): + def __init__(self, config: TimeSeriesTransformerConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = TimeSeriesTransformerAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.encoder_attn = TimeSeriesTransformerAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + cross_attn_layer_head_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of + size `(decoder_attention_heads,)`. + past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class TimeSeriesTransformerPreTrainedModel(PreTrainedModel): + config_class = TimeSeriesTransformerConfig + base_model_prefix = "model" + main_input_name = "past_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (TimeSeriesTransformerDecoder, TimeSeriesTransformerEncoder)): + module.gradient_checkpointing = value + + +TIME_SERIES_TRANSFORMER_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`TimeSeriesTransformerConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING = r""" + Args: + past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Past values of the time series, that serve as context in order to predict the future. These values may + contain lags, i.e. additional values from the past which are added in order to serve as "extra context". + The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as + `static_categorical_features`, `static_real_features`, `past_time_features`). + + The sequence length here is equal to `context_length` + `max(config.lags_sequence)`. + + Missing values need to be replaced with zeros. + + past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*): + Optional time features, which the model internally will add to `past_values`. These could be things like + "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These + could also be so-called "age" features, which basically help the model know "at which point in life" a + time-series is. Age features have small values for distant past time steps and increase monotonically the + more we approach the current time step. + + These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where + the position encodings are learned from scratch internally as parameters of the model, the Time Series + Transformer requires to provide additional time features. + + The Time Series Transformer only learns additional embeddings for `static_categorical_features`. + + past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): + Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in + `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + + static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): + Optional static categorical features for which the model will learn an embedding, which it will add to the + values of the time series. + + Static categorical features are features which have the same value for all time steps (static over time). + + A typical example of a static categorical feature is a time series ID. + + static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): + Optional static real features which the model will add to the values of the time series. + + Static real features are features which have the same value for all time steps (static over time). + + A typical example of a static real feature is promotion information. + + future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`): + Future values of the time series, that serve as labels for the model. The `future_values` is what the + Transformer needs to learn to output, given the `past_values`. + + See the demo notebook and code snippets for details. + + Missing values need to be replaced with zeros. + + future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*): + Optional time features, which the model internally will add to `future_values`. These could be things like + "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These + could also be so-called "age" features, which basically help the model know "at which point in life" a + time-series is. Age features have small values for distant past time steps and increase monotonically the + more we approach the current time step. + + These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where + the position encodings are learned from scratch internally as parameters of the model, the Time Series + Transformer requires to provide additional features. + + The Time Series Transformer only learns additional embeddings for `static_categorical_features`. + + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to + make sure the model can only look at previous inputs in order to predict the future. + + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class TimeSeriesTransformerEncoder(TimeSeriesTransformerPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`TimeSeriesTransformerEncoderLayer`]. + + Args: + config: TimeSeriesTransformerConfig + """ + + def __init__(self, config: TimeSeriesTransformerConfig): + super().__init__(config) + + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + + embed_dim = config.d_model + + self.layers = nn.ModuleList([TimeSeriesTransformerEncoderLayer(config) for _ in range(config.encoder_layers)]) + self.layernorm_embedding = nn.LayerNorm(embed_dim) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + hidden_states = inputs_embeds + hidden_states = self.layernorm_embedding(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + if head_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): # skip the layer + layer_outputs = (None, None) + else: + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + attention_mask, + (head_mask[idx] if head_mask is not None else None), + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class TimeSeriesTransformerDecoder(TimeSeriesTransformerPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a + [`TimeSeriesTransformerDecoderLayer`] + + Args: + config: TimeSeriesTransformerConfig + """ + + def __init__(self, config: TimeSeriesTransformerConfig): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + + self.layers = nn.ModuleList([TimeSeriesTransformerDecoderLayer(config) for _ in range(config.decoder_layers)]) + self.layernorm_embedding = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length + ).to(inputs_embeds.device) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + def forward( + self, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + r""" + Args: + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing + cross-attention on hidden heads. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + input_shape = inputs_embeds.size()[:-1] + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + + hidden_states = inputs_embeds + hidden_states = self.layernorm_embedding(hidden_states) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): + if attn_mask is not None: + if attn_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, use_cache) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + head_mask[idx] if head_mask is not None else None, + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + None, + ) + else: + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=( + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None + ), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare Time Series Transformer Model outputting raw hidden-states without any specific head on top.", + TIME_SERIES_TRANSFORMER_START_DOCSTRING, +) +class TimeSeriesTransformerModel(TimeSeriesTransformerPreTrainedModel): + def __init__(self, config: TimeSeriesTransformerConfig): + super().__init__(config) + + if config.scaling: + self.scaler = MeanScaler(dim=1, keepdim=True) + else: + self.scaler = NOPScaler(dim=1, keepdim=True) + + self.embedder = FeatureEmbedder( + cardinalities=config.cardinality, + embedding_dims=config.embedding_dimension, + ) + + # transformer encoder-decoder and mask initializer + self.encoder = TimeSeriesTransformerEncoder(config) + self.decoder = TimeSeriesTransformerDecoder(config) + + # Initialize weights and apply final processing + self.post_init() + + @property + def _past_length(self) -> int: + return self.config.context_length + max(self.config.lags_sequence) + + def get_lagged_subsequences( + self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0 + ) -> torch.Tensor: + """ + Returns lagged subsequences of a given sequence. Returns a tensor of shape (N, S, C, I), + where S = subsequences_length and I = len(indices), containing lagged subsequences. Specifically, lagged[i, + j, :, k] = sequence[i, -indices[k]-S+j, :]. + + Args: + sequence: Tensor + The sequence from which lagged subsequences should be extracted. Shape: (N, T, C). + subsequences_length : int + Length of the subsequences to be extracted. + shift: int + Shift the lags by this amount back. + """ + sequence_length = sequence.shape[1] + indices = [lag - shift for lag in self.config.lags_sequence] + + try: + assert max(indices) + subsequences_length <= sequence_length, ( + f"lags cannot go further than history length, found lag {max(indices)} " + f"while history length is only {sequence_length}" + ) + except AssertionError as e: + e.args += (max(indices), sequence_length) + raise + + lagged_values = [] + for lag_index in indices: + begin_index = -lag_index - subsequences_length + end_index = -lag_index if lag_index > 0 else None + lagged_values.append(sequence[:, begin_index:end_index, ...]) + return torch.stack(lagged_values, dim=-1) + + def create_network_inputs( + self, + past_values: torch.Tensor, + past_time_features: torch.Tensor, + static_categorical_features: torch.Tensor, + static_real_features: torch.Tensor, + past_observed_mask: Optional[torch.Tensor] = None, + future_values: Optional[torch.Tensor] = None, + future_time_features: Optional[torch.Tensor] = None, + ): + # time feature + time_feat = ( + torch.cat( + ( + past_time_features[:, self._past_length - self.config.context_length :, ...], + future_time_features, + ), + dim=1, + ) + if future_values is not None + else past_time_features[:, self._past_length - self.config.context_length :, ...] + ) + + # target + if past_observed_mask is None: + past_observed_mask = torch.ones_like(past_values) + + context = past_values[:, -self.config.context_length :] + observed_context = past_observed_mask[:, -self.config.context_length :] + _, scale = self.scaler(context, observed_context) + + inputs = ( + torch.cat((past_values, future_values), dim=1) / scale + if future_values is not None + else past_values / scale + ) + + inputs_length = ( + self._past_length + self.config.prediction_length if future_values is not None else self._past_length + ) + try: + assert inputs.shape[1] == inputs_length, ( + f"input length {inputs.shape[1]} and dynamic feature lengths {inputs_length} does not match", + ) + except AssertionError as e: + e.args += (inputs.shape[1], inputs_length) + raise + + subsequences_length = ( + self.config.context_length + self.config.prediction_length + if future_values is not None + else self.config.context_length + ) + + # embeddings + embedded_cat = self.embedder(static_categorical_features) + static_feat = torch.cat( + (embedded_cat, static_real_features, scale.log()), + dim=1, + ) + expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1) + + features = torch.cat((expanded_static_feat, time_feat), dim=-1) + + # sequence = torch.cat((prior_input, inputs), dim=1) + lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length) + + lags_shape = lagged_sequence.shape + reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) + + transformer_inputs = torch.cat((reshaped_lagged_sequence, features), dim=-1) + + return transformer_inputs, scale, static_feat + + def enc_dec_outputs(self, transformer_inputs): + enc_input = transformer_inputs[:, : self.config.context_length, ...] + dec_input = transformer_inputs[:, self.config.context_length :, ...] + + encoder_outputs = self.encoder(inputs_embeds=enc_input) + decoder_outputs = self.decoder( + inputs_embeds=dec_input, encoder_hidden_states=encoder_outputs.last_hidden_state + ) + return encoder_outputs, decoder_outputs + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqTimeSeriesModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + past_values: torch.Tensor, + past_time_features: torch.Tensor, + past_observed_mask: torch.Tensor, + static_categorical_features: torch.Tensor, + static_real_features: torch.Tensor, + future_values: Optional[torch.Tensor] = None, + future_time_features: Optional[torch.Tensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + use_cache: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + Returns: + + Examples: + + ```python + >>> from transformers import TimeSeriesTransformerModel + >>> import torch + + >>> model = TimeSeriesTransformerModel.from_pretrained("huggingface/tst-base") + + >>> inputs = dict() + >>> batch_size = 2 + >>> cardinality = 5 + >>> num_time_features = 10 + >>> content_length = 8 + >>> prediction_length = 2 + >>> lags_sequence = [2, 3] + >>> past_length = context_length + max(lags_sequence) + + >>> # encoder inputs + >>> inputs["static_categorical_features"] = ids_tensor([batch_size, 1], cardinality) + >>> inputs["static_real_features"] = torch.randn([batch_size, 1]) + >>> inputs["past_time_features"] = torch.randn([batch_size, past_length, num_time_features]) + >>> inputs["past_values"] = torch.randn([batch_size, past_length]) + >>> inputs["past_observed_mask"] = torch.ones([batch_size, past_length]) + + >>> # decoder inputs + >>> inputs["future_time_features"] = torch.randn([batch_size, prediction_length, num_time_features]) + >>> inputs["future_values"] = torch.randn([batch_size, prediction_length]) + + >>> outputs = model(**inputs) + >>> last_hidden_states = outputs.last_hidden_state + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_inputs, scale, static_feat = self.create_network_inputs( + past_values=past_values, + past_time_features=past_time_features, + past_observed_mask=past_observed_mask, + static_categorical_features=static_categorical_features, + static_real_features=static_real_features, + future_values=future_values, + future_time_features=future_time_features, + ) + + if encoder_outputs is None: + enc_input = transformer_inputs[:, : self.config.context_length, ...] + encoder_outputs = self.encoder( + inputs_embeds=enc_input, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + dec_input = transformer_inputs[:, self.config.context_length :, ...] + decoder_outputs = self.decoder( + inputs_embeds=dec_input, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + (scale, static_feat) + + return Seq2SeqTimeSeriesModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + scale=scale, + static_features=static_feat, + ) + + +@add_start_docstrings( + "The Time Series Transformer Model with a distribution head on top for time-series forecasting.", + TIME_SERIES_TRANSFORMER_START_DOCSTRING, +) +class TimeSeriesTransformerForPrediction(TimeSeriesTransformerPreTrainedModel): + def __init__(self, config: TimeSeriesTransformerConfig): + super().__init__(config) + self.model = TimeSeriesTransformerModel(config) + if config.distribution_output == "student_t": + self.distribution_output = StudentTOutput() + elif config.distribution_output == "normal": + self.distribution_output = NormalOutput() + elif config.distribution_output == "negative_binomial": + self.distribution_output = NegativeBinomialOutput() + else: + raise ValueError(f"Unknown distribution output {config.distribution_output}") + + self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.d_model) + self.target_shape = self.distribution_output.event_shape + + if config.loss == "nll": + self.loss = NegativeLogLikelihood() + else: + raise ValueError(f"Unknown loss function {config.loss}") + + # Initialize weights of distribution_output and apply final processing + self.post_init() + + def output_params(self, dec_output): + return self.parameter_projection(dec_output) + + def get_encoder(self): + return self.model.get_encoder() + + def get_decoder(self): + return self.model.get_decoder() + + @torch.jit.ignore + def output_distribution(self, params, scale=None, trailing_n=None) -> torch.distributions.Distribution: + sliced_params = params + if trailing_n is not None: + sliced_params = [p[:, -trailing_n:] for p in params] + return self.distribution_output.distribution(sliced_params, scale=scale) + + @add_start_docstrings_to_model_forward(TIME_SERIES_TRANSFORMER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqTimeSeriesModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + past_values: torch.Tensor, + past_time_features: torch.Tensor, + past_observed_mask: torch.Tensor, + static_categorical_features: torch.Tensor, + static_real_features: torch.Tensor, + future_values: Optional[torch.Tensor] = None, + future_time_features: Optional[torch.Tensor] = None, + future_observed_mask: Optional[torch.Tensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + output_hidden_states: Optional[bool] = None, + output_attentions: Optional[bool] = None, + use_cache: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + r""" + Returns: + + future_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): + Boolean mask to indicate which `future_values` were observed and which were missing. Mask values selected + in `[0, 1]`: + + - 1 for values that are **observed**, + - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). + + This mask is used to filter out missing values for the final loss calculation. + + Examples: + + ```python + >>> from transformers import TimeSeriesTransformerForPrediction + >>> import torch + + >>> model = TimeSeriesTransformerForPrediction.from_pretrained("huggingface/tst-base") + + >>> inputs = dict() + >>> batch_size = 2 + >>> cardinality = 5 + >>> num_time_features = 10 + >>> content_length = 8 + >>> prediction_length = 2 + >>> lags_sequence = [2, 3] + >>> past_length = context_length + max(lags_sequence) + + >>> # encoder inputs + >>> inputs["static_categorical_features"] = ids_tensor([batch_size, 1], cardinality) + >>> inputs["static_real_features"] = torch.randn([batch_size, 1]) + >>> inputs["past_time_features"] = torch.randn([batch_size, past_length, num_time_features]) + >>> inputs["past_values"] = torch.randn([batch_size, past_length]) + >>> inputs["past_observed_mask"] = torch.ones([batch_size, past_length]) + + >>> # decoder inputs + >>> inputs["future_time_features"] = torch.randn([batch_size, prediction_length, num_time_features]) + >>> inputs["future_values"] = torch.randn([batch_size, prediction_length]) + + >>> outputs = model(**inputs) + >>> loss = outputs.loss + ```""" + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if future_values is not None: + use_cache = False + + outputs = self.model( + past_values=past_values, + past_time_features=past_time_features, + past_observed_mask=past_observed_mask, + static_categorical_features=static_categorical_features, + static_real_features=static_real_features, + future_values=future_values, + future_time_features=future_time_features, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + past_key_values=past_key_values, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + use_cache=use_cache, + return_dict=return_dict, + ) + + prediction_loss = None + params = None + if future_values is not None: + params = self.output_params(outputs[0]) # outputs.last_hidden_state + distribution = self.output_distribution(params, outputs[-2]) # outputs.scale + + loss = self.loss(distribution, future_values) + + if future_observed_mask is None: + future_observed_mask = torch.ones_like(future_values) + + if len(self.target_shape) == 0: + loss_weights = future_observed_mask + else: + loss_weights = future_observed_mask.min(dim=-1, keepdim=False) + + prediction_loss = weighted_average(loss, weights=loss_weights) + + if not return_dict: + outputs = ((params,) + outputs[1:]) if params is not None else outputs[1:] + return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs + + return Seq2SeqTimeSeriesPredictionOutput( + loss=prediction_loss, + params=params, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + scale=outputs.scale, + static_features=outputs.static_features, + ) + + @torch.no_grad() + def generate( + self, + static_categorical_features: torch.Tensor, + static_real_features: torch.Tensor, + past_time_features: torch.Tensor, + past_values: torch.Tensor, + past_observed_mask: torch.Tensor, + future_time_features: Optional[torch.Tensor], + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) -> torch.Tensor: + outputs = self( + static_categorical_features=static_categorical_features, + static_real_features=static_real_features, + past_time_features=past_time_features, + past_values=past_values, + past_observed_mask=past_observed_mask, + future_time_features=future_time_features, + future_values=None, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=True, + use_cache=True, + ) + + decoder = self.model.get_decoder() + enc_last_hidden = outputs.encoder_last_hidden_state + scale = outputs.scale + static_feat = outputs.static_features + + num_parallel_samples = self.config.num_parallel_samples + repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0) + + repeated_past_values = past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) / repeated_scale + + expanded_static_feat = static_feat.unsqueeze(1).expand(-1, future_time_features.shape[1], -1) + features = torch.cat((expanded_static_feat, future_time_features), dim=-1) + repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0) + + repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0) + + future_samples = [] + + # greedy decoding + for k in range(self.config.prediction_length): + lagged_sequence = self.model.get_lagged_subsequences( + sequence=repeated_past_values, + subsequences_length=1 + k, + shift=1, + ) + + lags_shape = lagged_sequence.shape + reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) + + decoder_input = torch.cat((reshaped_lagged_sequence, repeated_features[:, : k + 1]), dim=-1) + + dec_output = decoder(inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden) + dec_last_hidden = dec_output.last_hidden_state + + params = self.parameter_projection(dec_last_hidden[:, -1:]) + distr = self.output_distribution(params, scale=repeated_scale) + next_sample = distr.sample() + + repeated_past_values = torch.cat((repeated_past_values, next_sample / repeated_scale), dim=1) + future_samples.append(next_sample) + + concat_future_samples = torch.cat(future_samples, dim=1) + + return SampleTimeSeriesPredictionOutput( + sequences=concat_future_samples.reshape( + (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, + ) + ) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 769cc4c4b346b5..9f540bd2838634 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -4811,6 +4811,30 @@ def load_tf_weights_in_t5(*args, **kwargs): requires_backends(load_tf_weights_in_t5, ["torch"]) +TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TimeSeriesTransformerForPrediction(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimeSeriesTransformerModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class TimeSeriesTransformerPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/models/time_series_transformer/__init__.py b/tests/models/time_series_transformer/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/time_series_transformer/test_modeling_time_series_transformer.py b/tests/models/time_series_transformer/test_modeling_time_series_transformer.py new file mode 100644 index 00000000000000..d513f1fe21252b --- /dev/null +++ b/tests/models/time_series_transformer/test_modeling_time_series_transformer.py @@ -0,0 +1,438 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch TimeSeriesTransformer model. """ + +import inspect +import tempfile +import unittest + +from huggingface_hub import hf_hub_download +from transformers import is_torch_available +from transformers.testing_utils import require_torch, slow, torch_device + +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor + + +TOLERANCE = 1e-4 + +if is_torch_available(): + import torch + + from transformers import ( + TimeSeriesTransformerConfig, + TimeSeriesTransformerForPrediction, + TimeSeriesTransformerModel, + ) + from transformers.models.time_series_transformer.modeling_time_series_transformer import ( + TimeSeriesTransformerDecoder, + TimeSeriesTransformerEncoder, + ) + + +@require_torch +class TimeSeriesTransformerModelTester: + def __init__( + self, + parent, + batch_size=13, + prediction_length=7, + context_length=14, + cardinality=19, + embedding_dimension=5, + num_time_features=4, + is_training=True, + hidden_size=16, + num_hidden_layers=2, + num_attention_heads=4, + intermediate_size=4, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + lags_sequence=[1, 2, 3, 4, 5], + ): + self.parent = parent + self.batch_size = batch_size + self.prediction_length = prediction_length + self.context_length = context_length + self.cardinality = cardinality + self.num_time_features = num_time_features + self.lags_sequence = lags_sequence + self.embedding_dimension = embedding_dimension + self.is_training = is_training + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + + self.encoder_seq_length = context_length + self.decoder_seq_length = prediction_length + + def get_config(self): + return TimeSeriesTransformerConfig( + encoder_layers=self.num_hidden_layers, + decoder_layers=self.num_hidden_layers, + encoder_attention_heads=self.num_attention_heads, + decoder_attention_heads=self.num_attention_heads, + encoder_ffn_dim=self.intermediate_size, + decoder_ffn_dim=self.intermediate_size, + dropout=self.hidden_dropout_prob, + attention_dropout=self.attention_probs_dropout_prob, + prediction_length=self.prediction_length, + context_length=self.context_length, + lags_sequence=self.lags_sequence, + num_time_features=self.num_time_features, + num_static_categorical_features=1, + cardinality=[self.cardinality], + embedding_dimension=[self.embedding_dimension], + ) + + def prepare_time_series_transformer_inputs_dict(self, config): + _past_length = config.context_length + max(config.lags_sequence) + + static_categorical_features = ids_tensor([self.batch_size, 1], config.cardinality[0]) + static_real_features = floats_tensor([self.batch_size, 1]) + + past_time_features = floats_tensor([self.batch_size, _past_length, config.num_time_features]) + past_values = floats_tensor([self.batch_size, _past_length]) + past_observed_mask = floats_tensor([self.batch_size, _past_length]) + + # decoder inputs + future_time_features = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features]) + future_values = floats_tensor([self.batch_size, config.prediction_length]) + + inputs_dict = { + "past_values": past_values, + "static_categorical_features": static_categorical_features, + "static_real_features": static_real_features, + "past_time_features": past_time_features, + "past_observed_mask": past_observed_mask, + "future_time_features": future_time_features, + "future_values": future_values, + } + return inputs_dict + + def prepare_config_and_inputs(self): + config = self.get_config() + inputs_dict = self.prepare_time_series_transformer_inputs_dict(config) + return config, inputs_dict + + def prepare_config_and_inputs_for_common(self): + config, inputs_dict = self.prepare_config_and_inputs() + return config, inputs_dict + + def check_encoder_decoder_model_standalone(self, config, inputs_dict): + model = TimeSeriesTransformerModel(config=config).to(torch_device).eval() + outputs = model(**inputs_dict) + + encoder_last_hidden_state = outputs.encoder_last_hidden_state + last_hidden_state = outputs.last_hidden_state + + with tempfile.TemporaryDirectory() as tmpdirname: + encoder = model.get_encoder() + encoder.save_pretrained(tmpdirname) + encoder = TimeSeriesTransformerEncoder.from_pretrained(tmpdirname).to(torch_device) + + transformer_inputs, _, _ = model.create_network_inputs(**inputs_dict) + enc_input = transformer_inputs[:, : config.context_length, ...] + dec_input = transformer_inputs[:, config.context_length :, ...] + + encoder_last_hidden_state_2 = encoder(inputs_embeds=enc_input)[0] + + self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) + + with tempfile.TemporaryDirectory() as tmpdirname: + decoder = model.get_decoder() + decoder.save_pretrained(tmpdirname) + decoder = TimeSeriesTransformerDecoder.from_pretrained(tmpdirname).to(torch_device) + + last_hidden_state_2 = decoder( + inputs_embeds=dec_input, + encoder_hidden_states=encoder_last_hidden_state, + )[0] + + self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) + + +@require_torch +class TimeSeriesTransformerModelTest(ModelTesterMixin, unittest.TestCase): + all_model_classes = ( + (TimeSeriesTransformerModel, TimeSeriesTransformerForPrediction) if is_torch_available() else () + ) + all_generative_model_classes = (TimeSeriesTransformerForPrediction,) if is_torch_available() else () + is_encoder_decoder = True + test_pruning = False + test_head_masking = False + test_missing_keys = False + test_torchscript = False + test_inputs_embeds = False + test_model_common_attributes = False + + def setUp(self): + self.model_tester = TimeSeriesTransformerModelTester(self) + self.config_tester = ConfigTester(self, config_class=TimeSeriesTransformerConfig, has_text_modality=False) + + def test_config(self): + self.config_tester.run_common_tests() + + def test_save_load_strict(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs() + for model_class in self.all_model_classes: + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) + self.assertEqual(info["missing_keys"], []) + + def test_encoder_decoder_model_standalone(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() + self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) + + # Ignore since we have no tokens embeddings + def test_resize_tokens_embeddings(self): + pass + + # # Input is 'static_categorical_features' not 'input_ids' + def test_model_main_input_name(self): + model_signature = inspect.signature(getattr(TimeSeriesTransformerModel, "forward")) + # The main input is the name of the argument after `self` + observed_main_input_name = list(model_signature.parameters.keys())[1] + self.assertEqual(TimeSeriesTransformerModel.main_input_name, observed_main_input_name) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = [ + "past_values", + "past_time_features", + "past_observed_mask", + "static_categorical_features", + "static_real_features", + "future_values", + "future_time_features", + ] + + expected_arg_names.extend( + [ + "future_observed_mask", + "decoder_attention_mask", + "head_mask", + "decoder_head_mask", + "cross_attn_head_mask", + "encoder_outputs", + "past_key_values", + "output_hidden_states", + "output_attentions", + "use_cache", + "return_dict", + ] + if "future_observed_mask" in arg_names + else [ + "decoder_attention_mask", + "head_mask", + "decoder_head_mask", + "cross_attn_head_mask", + "encoder_outputs", + "past_key_values", + "output_hidden_states", + "output_attentions", + "use_cache", + "return_dict", + ] + ) + + self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + seq_len = getattr(self.model_tester, "seq_length", None) + decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) + encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + self.assertListEqual( + list(attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], + ) + out_len = len(outputs) + + correct_outlen = 6 + + if "last_hidden_state" in outputs: + correct_outlen += 1 + + if "past_key_values" in outputs: + correct_outlen += 1 # past_key_values have been returned + + if "loss" in outputs: + correct_outlen += 1 + + if "params" in outputs: + correct_outlen += 1 + + self.assertEqual(out_len, correct_outlen) + + # decoder attentions + decoder_attentions = outputs.decoder_attentions + self.assertIsInstance(decoder_attentions, (list, tuple)) + self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(decoder_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, decoder_seq_length, decoder_seq_length], + ) + + # cross attentions + cross_attentions = outputs.cross_attentions + self.assertIsInstance(cross_attentions, (list, tuple)) + self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(cross_attentions[0].shape[-3:]), + [ + self.model_tester.num_attention_heads, + decoder_seq_length, + encoder_seq_length, + ], + ) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + self.assertEqual(out_len + 2, len(outputs)) + + self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + + self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(self_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, encoder_seq_length, encoder_seq_length], + ) + + +def prepare_batch(filename="train-batch.pt"): + file = hf_hub_download(repo_id="kashif/tourism-monthly-batch", filename=filename, repo_type="dataset") + batch = torch.load(file, map_location=torch_device) + return batch + + +@require_torch +@slow +class TimeSeriesTransformerModelIntegrationTests(unittest.TestCase): + def test_inference_no_head(self): + model = TimeSeriesTransformerModel.from_pretrained("huggingface/time-series-transformer-tourism-monthly").to( + torch_device + ) + batch = prepare_batch() + + with torch.no_grad(): + output = model( + past_values=batch["past_values"], + past_time_features=batch["past_time_features"], + past_observed_mask=batch["past_observed_mask"], + static_categorical_features=batch["static_categorical_features"], + static_real_features=batch["static_real_features"], + future_values=batch["future_values"], + future_time_features=batch["future_time_features"], + )[0] + + expected_shape = torch.Size((64, model.config.prediction_length, model.config.d_model)) + self.assertEqual(output.shape, expected_shape) + + expected_slice = torch.tensor( + [[-0.3125, -1.2884, -1.1118], [-0.5801, -1.4907, -0.7782], [0.0849, -1.6557, -0.9755]], device=torch_device + ) + self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) + + def test_inference_head(self): + model = TimeSeriesTransformerForPrediction.from_pretrained( + "huggingface/time-series-transformer-tourism-monthly" + ).to(torch_device) + batch = prepare_batch("val-batch.pt") + with torch.no_grad(): + output = model( + past_values=batch["past_values"], + past_time_features=batch["past_time_features"], + past_observed_mask=batch["past_observed_mask"], + static_categorical_features=batch["static_categorical_features"], + static_real_features=batch["static_real_features"], + future_time_features=batch["future_time_features"], + )[1] + expected_shape = torch.Size((64, model.config.prediction_length, model.config.d_model)) + self.assertEqual(output.shape, expected_shape) + + expected_slice = torch.tensor( + [[0.9127, -0.2056, -0.5259], [1.0572, 1.4104, -0.1964], [0.1358, 2.0348, 0.5739]], device=torch_device + ) + self.assertTrue(torch.allclose(output[0, :3, :3], expected_slice, atol=TOLERANCE)) + + def test_seq_to_seq_generation(self): + model = TimeSeriesTransformerForPrediction.from_pretrained( + "huggingface/time-series-transformer-tourism-monthly" + ).to(torch_device) + batch = prepare_batch("val-batch.pt") + with torch.no_grad(): + outputs = model.generate( + static_categorical_features=batch["static_categorical_features"], + static_real_features=batch["static_real_features"], + past_time_features=batch["past_time_features"], + past_values=batch["past_values"], + future_time_features=batch["future_time_features"], + past_observed_mask=batch["past_observed_mask"], + ) + expected_shape = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length)) + self.assertEqual(outputs.sequences.shape, expected_shape) + + expected_slice = torch.tensor([2289.5203, 2778.3054, 4648.1313], device=torch_device) + mean_prediction = outputs.sequences.mean(dim=1) + self.assertTrue(torch.allclose(mean_prediction[0, -3:], expected_slice, rtol=1e-1)) diff --git a/utils/check_repo.py b/utils/check_repo.py index 5a6e4bd24347c2..fdfcb292e43428 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -46,6 +46,8 @@ # Being in this list is an exception and should **not** be the rule. IGNORE_NON_TESTED = PRIVATE_MODELS.copy() + [ # models to ignore for not tested + "TimeSeriesTransformerEncoder", # Building part of bigger (tested) model. + "TimeSeriesTransformerDecoder", # Building part of bigger (tested) model. "DeformableDetrEncoder", # Building part of bigger (tested) model. "DeformableDetrDecoder", # Building part of bigger (tested) model. "OPTDecoder", # Building part of bigger (tested) model. @@ -132,6 +134,7 @@ # should **not** be the rule. IGNORE_NON_AUTO_CONFIGURED = PRIVATE_MODELS.copy() + [ # models to ignore for model xxx mapping + "TimeSeriesTransformerForPrediction", "PegasusXEncoder", "PegasusXDecoder", "PegasusXDecoderWrapper", From 36f52e9593cce6530b04cee7a16ed84b8f424a2e Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Mon, 3 Oct 2022 12:02:51 +0300 Subject: [PATCH 436/539] Restructure DETR post-processing, return prediction scores (#19262) * Restructure DetrFeatureExtractor post-processing methods * Update post_process_instance_segmentation and post_process_panoptic_segmentation methods to return prediction scores * Update DETR models docs --- docs/source/en/model_doc/detr.mdx | 6 +- .../models/detr/feature_extraction_detr.py | 308 +++++++++--------- src/transformers/models/detr/modeling_detr.py | 4 +- 3 files changed, 163 insertions(+), 155 deletions(-) diff --git a/docs/source/en/model_doc/detr.mdx b/docs/source/en/model_doc/detr.mdx index 9739ead3a44a5a..a6025580a6cb60 100644 --- a/docs/source/en/model_doc/detr.mdx +++ b/docs/source/en/model_doc/detr.mdx @@ -171,9 +171,9 @@ mean Average Precision (mAP) and Panoptic Quality (PQ). The latter objects are i [[autodoc]] DetrFeatureExtractor - __call__ - pad_and_create_pixel_mask - - post_process - - post_process_segmentation - - post_process_panoptic + - post_process_semantic_segmentation + - post_process_instance_segmentation + - post_process_panoptic_segmentation ## DetrModel diff --git a/src/transformers/models/detr/feature_extraction_detr.py b/src/transformers/models/detr/feature_extraction_detr.py index 3ede3662a17064..04fb123cf603d9 100644 --- a/src/transformers/models/detr/feature_extraction_detr.py +++ b/src/transformers/models/detr/feature_extraction_detr.py @@ -141,11 +141,33 @@ def binary_mask_to_rle(mask): return [x for x in runs] +def convert_segmentation_to_rle(segmentation): + """ + Converts given segmentation map of shape (height, width) to the run-length encoding (RLE) format. + + Args: + segmentation (`torch.Tensor` or `numpy.array`): + A segmentation map of shape `(height, width)` where each value denotes a segment or class id. + Returns: + `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. + """ + segment_ids = torch.unique(segmentation) + + run_length_encodings = [] + for idx in segment_ids: + mask = torch.where(segmentation == idx, 1, 0) + rle = binary_mask_to_rle(mask) + run_length_encodings.append(rle) + + return run_length_encodings + + def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ + Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and + `labels`. + Args: - Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` - and `labels`. masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): @@ -168,6 +190,81 @@ def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_ return masks[to_keep], scores[to_keep], labels[to_keep] +def check_segment_validity(mask_labels, mask_probs, k, overlap_mask_area_threshold=0.8): + # Get the mask associated with the k class + mask_k = mask_labels == k + mask_k_area = mask_k.sum() + + # Compute the area of all the stuff in query k + original_area = (mask_probs[k] >= 0.5).sum() + mask_exists = mask_k_area > 0 and original_area > 0 + + # Eliminate disconnected tiny segments + if mask_exists: + area_ratio = mask_k_area / original_area + if not area_ratio.item() > overlap_mask_area_threshold: + mask_exists = False + + return mask_exists, mask_k + + +def compute_segments( + mask_probs, + pred_scores, + pred_labels, + overlap_mask_area_threshold: float = 0.8, + label_ids_to_fuse: Optional[Set[int]] = None, + target_size: Tuple[int, int] = None, +): + height = mask_probs.shape[1] if target_size is None else target_size[0] + width = mask_probs.shape[2] if target_size is None else target_size[1] + + segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) + segments: List[Dict] = [] + + if target_size is not None: + mask_probs = nn.functional.interpolate( + mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False + )[0] + + current_segment_id = 0 + + # Weigh each mask by its prediction score + mask_probs *= pred_scores.view(-1, 1, 1) + mask_labels = mask_probs.argmax(0) # [height, width] + + # Keep track of instances of each class + stuff_memory_list: Dict[str, int] = {} + for k in range(pred_labels.shape[0]): + pred_class = pred_labels[k].item() + should_fuse = pred_class in label_ids_to_fuse + + # Check if mask exists and large enough to be a segment + mask_exists, mask_k = check_segment_validity(mask_labels, mask_probs, k, overlap_mask_area_threshold) + + if mask_exists: + if pred_class in stuff_memory_list: + current_segment_id = stuff_memory_list[pred_class] + else: + current_segment_id += 1 + + # Add current object segment to final segmentation map + segmentation[mask_k] = current_segment_id + segment_score = round(pred_scores[k].item(), 6) + segments.append( + { + "id": current_segment_id, + "label_id": pred_class, + "was_fused": should_fuse, + "score": segment_score, + } + ) + if should_fuse: + stuff_memory_list[pred_class] = current_segment_id + + return segmentation, segments + + class DetrFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a DETR feature extractor. @@ -1098,7 +1195,7 @@ def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[i semantic_segmentation = [] for idx in range(batch_size): - resized_logits = torch.nn.functional.interpolate( + resized_logits = nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) @@ -1114,31 +1211,34 @@ def post_process_instance_segmentation( outputs, threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, - target_sizes: List[Tuple] = None, + target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, - ): + ) -> List[Dict]: """ Args: Converts the output of [`DetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. outputs ([`DetrForSegmentation`]): Raw outputs of the model. - threshold (`float`, *optional*): - The probability score threshold to keep predicted instance masks, defaults to 0.5. - overlap_mask_area_threshold (`float`, *optional*): + threshold (`float`, *optional*, defaults to 0.5): + The probability score threshold to keep predicted instance masks. + overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary - instance mask, defaults to 0.8. - target_sizes (`List[Tuple]`, *optional*, defaults to `None`): + instance mask. + target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. - return_coco_annotation (`bool`, *optional*, defaults to `False`): - If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. + return_coco_annotation (`bool`, *optional*): + Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) + format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or - `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_format is set to `True`. - - **segment_ids** -- A dictionary that maps segment ids to semantic class ids. + `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to + `True`. Set to `None` if no mask if found above `threshold`. + - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - - **label_id** -- An integer representing the segment's label / semantic class id. + - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. + - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] @@ -1159,76 +1259,27 @@ def post_process_instance_segmentation( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) - height, width = target_sizes[i][0], target_sizes[i][1] - segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs_item.device) - segments: List[Dict] = [] - - object_detected = mask_probs_item.shape[0] > 0 - - if object_detected: - # Resize mask to corresponding target_size - if target_sizes is not None: - mask_probs_item = torch.nn.functional.interpolate( - mask_probs_item.unsqueeze(0), - size=target_sizes[i], - mode="bilinear", - align_corners=False, - )[0] - - current_segment_id = 0 - - # Weigh each mask by its prediction score - mask_probs_item *= pred_scores_item.view(-1, 1, 1) - mask_labels_item = mask_probs_item.argmax(0) # [height, width] - - # Keep track of instances of each class - stuff_memory_list: Dict[str, int] = {} - for k in range(pred_labels_item.shape[0]): - # Get the mask associated with the k class - pred_class = pred_labels_item[k].item() - mask_k = mask_labels_item == k - mask_k_area = mask_k.sum() - - # Compute the area of all the stuff in query k - original_area = (mask_probs_item[k] >= 0.5).sum() - mask_exists = mask_k_area > 0 and original_area > 0 - - if mask_exists: - # Eliminate segments with mask area below threshold - area_ratio = mask_k_area / original_area - if not area_ratio.item() > overlap_mask_area_threshold: - continue - - # Add corresponding class id - if pred_class in stuff_memory_list: - current_segment_id = stuff_memory_list[pred_class] - else: - current_segment_id += 1 - - # Add current object segment to final segmentation map - segmentation[mask_k] = current_segment_id - segments.append( - { - "id": current_segment_id, - "label_id": pred_class, - } - ) - else: - segmentation -= 1 + # No mask found + if mask_probs_item.shape[0] <= 0: + segmentation = None + segments: List[Dict] = [] + continue + + # Get segmentation map and segment information of batch item + target_size = target_sizes[i] if target_sizes is not None else None + segmentation, segments = compute_segments( + mask_probs_item, + pred_scores_item, + pred_labels_item, + overlap_mask_area_threshold, + target_size, + ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: - segment_ids = torch.unique(segmentation) + segmentation = convert_segmentation_to_rle(segmentation) - run_length_encodings = [] - for idx in segment_ids: - mask = torch.where(segmentation == idx, 1, 0) - rle = binary_mask_to_rle(mask) - run_length_encodings.append(rle) - - segmentation = run_length_encodings - - results.append({"segmentation": segmentation, "segment_ids": segments}) + results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_panoptic_segmentation( @@ -1237,7 +1288,7 @@ def post_process_panoptic_segmentation( threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, - target_sizes: List[Tuple] = None, + target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Args: @@ -1250,7 +1301,7 @@ def post_process_panoptic_segmentation( overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. - label_ids_to_fuse (`Set[int]`, *optional*, defaults to `None`): + label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. @@ -1260,13 +1311,15 @@ def post_process_panoptic_segmentation( resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`. If - `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - - **segment_ids** -- A dictionary that maps segment ids to semantic class ids. - - **id** -- An integer representing the `segment_id`. - - **label_id** -- An integer representing the segment's label / semantic class id. + - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or + `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to + the corresponding `target_sizes` entry. + - **segments_info** -- A dictionary that contains additional information on each segment. + - **id** -- an integer representing the `segment_id`. + - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. + - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: @@ -1292,67 +1345,22 @@ def post_process_panoptic_segmentation( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) - height, width = target_sizes[i][0], target_sizes[i][1] - segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs_item.device) - segments: List[Dict] = [] - - object_detected = mask_probs_item.shape[0] > 0 - - if object_detected: - # Resize mask to corresponding target_size - if target_sizes is not None: - mask_probs_item = torch.nn.functional.interpolate( - mask_probs_item.unsqueeze(0), - size=target_sizes[i], - mode="bilinear", - align_corners=False, - )[0] - - current_segment_id = 0 - - # Weigh each mask by its prediction score - mask_probs_item *= pred_scores_item.view(-1, 1, 1) - mask_labels_item = mask_probs_item.argmax(0) # [height, width] - - # Keep track of instances of each class - stuff_memory_list: Dict[str, int] = {} - for k in range(pred_labels_item.shape[0]): - pred_class = pred_labels_item[k].item() - should_fuse = pred_class in label_ids_to_fuse - - # Get the mask associated with the k class - mask_k = mask_labels_item == k - mask_k_area = mask_k.sum() - - # Compute the area of all the stuff in query k - original_area = (mask_probs_item[k] >= 0.5).sum() - mask_exists = mask_k_area > 0 and original_area > 0 - - if mask_exists: - # Eliminate disconnected tiny segments - area_ratio = mask_k_area / original_area - if not area_ratio.item() > overlap_mask_area_threshold: - continue - - # Add corresponding class id - if pred_class in stuff_memory_list: - current_segment_id = stuff_memory_list[pred_class] - else: - current_segment_id += 1 - - # Add current object segment to final segmentation map - segmentation[mask_k] = current_segment_id - segments.append( - { - "id": current_segment_id, - "label_id": pred_class, - "was_fused": should_fuse, - } - ) - if should_fuse: - stuff_memory_list[pred_class] = current_segment_id - else: - segmentation -= 1 + # No mask found + if mask_probs_item.shape[0] <= 0: + segmentation = None + segments: List[Dict] = [] + continue + + # Get segmentation map and segment information of batch item + target_size = target_sizes[i] if target_sizes is not None else None + segmentation, segments = compute_segments( + mask_probs_item, + pred_scores_item, + pred_labels_item, + overlap_mask_area_threshold, + label_ids_to_fuse, + target_size, + ) - results.append({"segmentation": segmentation, "segment_ids": segments}) + results.append({"segmentation": segmentation, "segments_info": segments}) return results diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index dc5b562626588c..724c2b71a7bfd5 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -1605,12 +1605,12 @@ def forward( >>> # Use the `post_process_panoptic_segmentation` method of `DetrFeatureExtractor` to retrieve post-processed panoptic segmentation maps >>> # Segmentation results are returned as a list of dictionaries - >>> result = feature_extractor.post_process_panoptic_segmentation(outputs, processed_sizes) + >>> result = feature_extractor.post_process_panoptic_segmentation(outputs, target_size=[(300, 500)]) >>> # A tensor of shape (height, width) where each value denotes a segment id >>> panoptic_seg = result[0]["segmentation"] >>> # Get mapping of segment ids to semantic class ids - >>> panoptic_segments_info = result[0]["segment_ids"] + >>> panoptic_segments_info = result[0]["segments_info"] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict From c28d04e9e252a1a099944e325685f14d242ecdcd Mon Sep 17 00:00:00 2001 From: Divyanshu Kumar <53843818+divyanshugit@users.noreply.github.com> Date: Mon, 3 Oct 2022 18:51:51 +0530 Subject: [PATCH 437/539] Update no_trainer script for summarization (#19277) * Update no_trainer script for summarization * removed unnecessary import * fixes notation mistake * removed: unused variable --- .../summarization/run_summarization_no_trainer.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index eb809ada9915db..594d468330e78a 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -669,7 +669,6 @@ def postprocess_text(preds, labels): "max_length": args.val_max_target_length if args is not None else config.max_length, "num_beams": args.num_beams, } - samples_seen = 0 for step, batch in enumerate(eval_dataloader): with torch.no_grad(): generated_tokens = accelerator.unwrap_model(model).generate( @@ -686,7 +685,7 @@ def postprocess_text(preds, labels): # If we did not pad to max length, we need to pad the labels too labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id) - generated_tokens, labels = accelerator.gather((generated_tokens, labels)) + generated_tokens, labels = accelerator.gather_for_metrics(generated_tokens, labels) generated_tokens = generated_tokens.cpu().numpy() labels = labels.cpu().numpy() @@ -699,14 +698,8 @@ def postprocess_text(preds, labels): decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) - # If we are in a multiprocess environment, the last batch has duplicates - if accelerator.num_processes > 1: - if step == len(eval_dataloader) - 1: - decoded_preds = decoded_preds[: len(eval_dataloader.dataset) - samples_seen] - decoded_labels = decoded_labels[: len(eval_dataloader.dataset) - samples_seen] - else: - samples_seen += len(decoded_labels) + decoded_preds, decoded_labels = accelerator.gather_for_metrics(decoded_preds, decoded_labels) metric.add_batch( predictions=decoded_preds, references=decoded_labels, From 18c06208c4fa6868f226ad82d1e58f1ce520fe8f Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Mon, 3 Oct 2022 12:42:04 -0400 Subject: [PATCH 438/539] Don't automatically add bug label (#19302) --- .github/ISSUE_TEMPLATE/bug-report.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index b1d52c8a3cd602..31330fdf3a6484 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -1,6 +1,5 @@ name: "\U0001F41B Bug Report" description: Submit a bug report to help us improve transformers -labels: [ "bug" ] body: - type: textarea id: system-info From 68f50f3453e72719b830342f2704410f8f3334e4 Mon Sep 17 00:00:00 2001 From: Steven Liu <59462357+stevhliu@users.noreply.github.com> Date: Mon, 3 Oct 2022 13:18:29 -0700 Subject: [PATCH 439/539] Breakup export guide (#19271) * split onnx and torchscript docs * make style * apply reviews --- docs/source/en/_toctree.yml | 4 +- docs/source/en/serialization.mdx | 445 +++++++++---------------------- docs/source/en/torchscript.mdx | 225 ++++++++++++++++ 3 files changed, 347 insertions(+), 327 deletions(-) create mode 100644 docs/source/en/torchscript.mdx diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 3ce68ad8598a1e..63a148bd3cc941 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -33,7 +33,9 @@ - local: converting_tensorflow_models title: Converting from TensorFlow checkpoints - local: serialization - title: Export 🤗 Transformers models + title: Export to ONNX + - local: torchscript + title: Export to TorchScript - local: troubleshooting title: Troubleshoot title: General usage diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index a1577447f7235b..903d35da4c4cd3 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -10,36 +10,36 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o specific language governing permissions and limitations under the License. --> -# Export 🤗 Transformers Models +# Export to ONNX -If you need to deploy 🤗 Transformers models in production environments, we -recommend exporting them to a serialized format that can be loaded and executed -on specialized runtimes and hardware. In this guide, we'll show you how to -export 🤗 Transformers models in two widely used formats: ONNX and TorchScript. +If you need to deploy 🤗 Transformers models in production environments, we recommend +exporting them to a serialized format that can be loaded and executed on specialized +runtimes and hardware. In this guide, we'll show you how to export 🤗 Transformers +models to [ONNX (Open Neural Network eXchange)](http://onnx.ai). -Once exported, a model can optimized for inference via techniques such as -quantization and pruning. If you are interested in optimizing your models to run -with maximum efficiency, check out the [🤗 Optimum + + +Once exported, a model can be optimized for inference via techniques such as +quantization and pruning. If you are interested in optimizing your models to run with +maximum efficiency, check out the [🤗 Optimum library](https://github.com/huggingface/optimum). -## ONNX + -The [ONNX (Open Neural Network eXchange)](http://onnx.ai) project is an open -standard that defines a common set of operators and a common file format to -represent deep learning models in a wide variety of frameworks, including -PyTorch and TensorFlow. When a model is exported to the ONNX format, these -operators are used to construct a computational graph (often called an -_intermediate representation_) which represents the flow of data through the -neural network. +ONNX is an open standard that defines a common set of operators and a common file format +to represent deep learning models in a wide variety of frameworks, including PyTorch and +TensorFlow. When a model is exported to the ONNX format, these operators are used to +construct a computational graph (often called an _intermediate representation_) which +represents the flow of data through the neural network. -By exposing a graph with standardized operators and data types, ONNX makes it -easy to switch between frameworks. For example, a model trained in PyTorch can -be exported to ONNX format and then imported in TensorFlow (and vice versa). +By exposing a graph with standardized operators and data types, ONNX makes it easy to +switch between frameworks. For example, a model trained in PyTorch can be exported to +ONNX format and then imported in TensorFlow (and vice versa). -🤗 Transformers provides a `transformers.onnx` package that enables you to -convert model checkpoints to an ONNX graph by leveraging configuration objects. -These configuration objects come ready made for a number of model architectures, -and are designed to be easily extendable to other architectures. +🤗 Transformers provides a [`transformers.onnx`](main_classes/onnx) package that enables +you to convert model checkpoints to an ONNX graph by leveraging configuration objects. +These configuration objects come ready made for a number of model architectures, and are +designed to be easily extendable to other architectures. Ready-made configurations include the following architectures: @@ -106,10 +106,10 @@ In the next two sections, we'll show you how to: * Export a supported model using the `transformers.onnx` package. * Export a custom model for an unsupported architecture. -### Exporting a model to ONNX +## Exporting a model to ONNX -To export a 🤗 Transformers model to ONNX, you'll first need to install some -extra dependencies: +To export a 🤗 Transformers model to ONNX, you'll first need to install some extra +dependencies: ```bash pip install transformers[onnx] @@ -141,7 +141,7 @@ Exporting a checkpoint using a ready-made configuration can be done as follows: python -m transformers.onnx --model=distilbert-base-uncased onnx/ ``` -which should show the following logs: +You should see the following logs: ```bash Validating ONNX model... @@ -152,13 +152,13 @@ Validating ONNX model... All good, model saved at: onnx/model.onnx ``` -This exports an ONNX graph of the checkpoint defined by the `--model` argument. -In this example it is `distilbert-base-uncased`, but it can be any checkpoint on -the Hugging Face Hub or one that's stored locally. +This exports an ONNX graph of the checkpoint defined by the `--model` argument. In this +example, it is `distilbert-base-uncased`, but it can be any checkpoint on the Hugging +Face Hub or one that's stored locally. The resulting `model.onnx` file can then be run on one of the [many -accelerators](https://onnx.ai/supported-tools.html#deployModel) that support the -ONNX standard. For example, we can load and run the model with [ONNX +accelerators](https://onnx.ai/supported-tools.html#deployModel) that support the ONNX +standard. For example, we can load and run the model with [ONNX Runtime](https://onnxruntime.ai/) as follows: ```python @@ -172,9 +172,8 @@ Runtime](https://onnxruntime.ai/) as follows: >>> outputs = session.run(output_names=["last_hidden_state"], input_feed=dict(inputs)) ``` -The required output names (i.e. `["last_hidden_state"]`) can be obtained by -taking a look at the ONNX configuration of each model. For example, for -DistilBERT we have: +The required output names (like `["last_hidden_state"]`) can be obtained by taking a +look at the ONNX configuration of each model. For example, for DistilBERT we have: ```python >>> from transformers.models.distilbert import DistilBertConfig, DistilBertOnnxConfig @@ -185,20 +184,19 @@ DistilBERT we have: ["last_hidden_state"] ``` -The process is identical for TensorFlow checkpoints on the Hub. For example, we -can export a pure TensorFlow checkpoint from the [Keras +The process is identical for TensorFlow checkpoints on the Hub. For example, we can +export a pure TensorFlow checkpoint from the [Keras organization](https://huggingface.co/keras-io) as follows: ```bash python -m transformers.onnx --model=keras-io/transformers-qa onnx/ ``` -To export a model that's stored locally, you'll need to have the model's weights -and tokenizer files stored in a directory. For example, we can load and save a -checkpoint as follows: +To export a model that's stored locally, you'll need to have the model's weights and +tokenizer files stored in a directory. For example, we can load and save a checkpoint as +follows: - - + ```python >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification @@ -216,8 +214,7 @@ argument of the `transformers.onnx` package to the desired directory: ```bash python -m transformers.onnx --model=local-pt-checkpoint onnx/ ``` - - + ```python >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification @@ -235,14 +232,13 @@ argument of the `transformers.onnx` package to the desired directory: ```bash python -m transformers.onnx --model=local-tf-checkpoint onnx/ ``` - - + -### Selecting features for different model topologies +## Selecting features for different model tasks -Each ready-made configuration comes with a set of _features_ that enable you to -export models for different types of topologies or tasks. As shown in the table -below, each feature is associated with a different auto class: +Each ready-made configuration comes with a set of _features_ that enable you to export +models for different types of tasks. As shown in the table below, each feature is +associated with a different `AutoClass`: | Feature | Auto Class | | ------------------------------------ | ------------------------------------ | @@ -255,7 +251,7 @@ below, each feature is associated with a different auto class: | `token-classification` | `AutoModelForTokenClassification` | For each configuration, you can find the list of supported features via the -`FeaturesManager`. For example, for DistilBERT we have: +[`~transformers.onnx.FeaturesManager`]. For example, for DistilBERT we have: ```python >>> from transformers.onnx.features import FeaturesManager @@ -266,15 +262,15 @@ For each configuration, you can find the list of supported features via the ``` You can then pass one of these features to the `--feature` argument in the -`transformers.onnx` package. For example, to export a text-classification model -we can pick a fine-tuned model from the Hub and run: +`transformers.onnx` package. For example, to export a text-classification model we can +pick a fine-tuned model from the Hub and run: ```bash python -m transformers.onnx --model=distilbert-base-uncased-finetuned-sst-2-english \ --feature=sequence-classification onnx/ ``` -which will display the following logs: +This displays the following logs: ```bash Validating ONNX model... @@ -285,37 +281,35 @@ Validating ONNX model... All good, model saved at: onnx/model.onnx ``` -Notice that in this case, the output names from the fine-tuned model are -`logits` instead of the `last_hidden_state` we saw with the -`distilbert-base-uncased` checkpoint earlier. This is expected since the -fine-tuned model has a sequence classification head. +Notice that in this case, the output names from the fine-tuned model are `logits` +instead of the `last_hidden_state` we saw with the `distilbert-base-uncased` checkpoint +earlier. This is expected since the fine-tuned model has a sequence classification head. -The features that have a `with-past` suffix (e.g. `causal-lm-with-past`) -correspond to model topologies with precomputed hidden states (key and values -in the attention blocks) that can be used for fast autoregressive decoding. +The features that have a `with-past` suffix (like `causal-lm-with-past`) correspond to +model classes with precomputed hidden states (key and values in the attention blocks) +that can be used for fast autoregressive decoding. -### Exporting a model for an unsupported architecture +## Exporting a model for an unsupported architecture -If you wish to export a model whose architecture is not natively supported by -the library, there are three main steps to follow: +If you wish to export a model whose architecture is not natively supported by the +library, there are three main steps to follow: 1. Implement a custom ONNX configuration. 2. Export the model to ONNX. 3. Validate the outputs of the PyTorch and exported models. -In this section, we'll look at how DistilBERT was implemented to show what's -involved with each step. +In this section, we'll look at how DistilBERT was implemented to show what's involved +with each step. -#### Implementing a custom ONNX configuration +### Implementing a custom ONNX configuration -Let's start with the ONNX configuration object. We provide three abstract -classes that you should inherit from, depending on the type of model -architecture you wish to export: +Let's start with the ONNX configuration object. We provide three abstract classes that +you should inherit from, depending on the type of model architecture you wish to export: * Encoder-based models inherit from [`~onnx.config.OnnxConfig`] * Decoder-based models inherit from [`~onnx.config.OnnxConfigWithPast`] @@ -347,25 +341,24 @@ Since DistilBERT is an encoder-based model, its configuration inherits from ... ) ``` -Every configuration object must implement the `inputs` property and return a -mapping, where each key corresponds to an expected input, and each value -indicates the axis of that input. For DistilBERT, we can see that two inputs are -required: `input_ids` and `attention_mask`. These inputs have the same shape of -`(batch_size, sequence_length)` which is why we see the same axes used in the -configuration. +Every configuration object must implement the `inputs` property and return a mapping, +where each key corresponds to an expected input, and each value indicates the axis of +that input. For DistilBERT, we can see that two inputs are required: `input_ids` and +`attention_mask`. These inputs have the same shape of `(batch_size, sequence_length)` +which is why we see the same axes used in the configuration. -Notice that `inputs` property for `DistilBertOnnxConfig` returns an -`OrderedDict`. This ensures that the inputs are matched with their relative -position within the `PreTrainedModel.forward()` method when tracing the graph. -We recommend using an `OrderedDict` for the `inputs` and `outputs` properties -when implementing custom ONNX configurations. +Notice that `inputs` property for `DistilBertOnnxConfig` returns an `OrderedDict`. This +ensures that the inputs are matched with their relative position within the +`PreTrainedModel.forward()` method when tracing the graph. We recommend using an +`OrderedDict` for the `inputs` and `outputs` properties when implementing custom ONNX +configurations. -Once you have implemented an ONNX configuration, you can instantiate it by -providing the base model's configuration as follows: +Once you have implemented an ONNX configuration, you can instantiate it by providing the +base model's configuration as follows: ```python >>> from transformers import AutoConfig @@ -374,8 +367,8 @@ providing the base model's configuration as follows: >>> onnx_config = DistilBertOnnxConfig(config) ``` -The resulting object has several useful properties. For example you can view the -ONNX operator set that will be used during the export: +The resulting object has several useful properties. For example, you can view the ONNX +operator set that will be used during the export: ```python >>> print(onnx_config.default_onnx_opset) @@ -389,15 +382,14 @@ You can also view the outputs associated with the model as follows: OrderedDict([("last_hidden_state", {0: "batch", 1: "sequence"})]) ``` -Notice that the outputs property follows the same structure as the inputs; it -returns an `OrderedDict` of named outputs and their shapes. The output structure -is linked to the choice of feature that the configuration is initialised with. -By default, the ONNX configuration is initialized with the `default` feature -that corresponds to exporting a model loaded with the `AutoModel` class. If you -want to export a different model topology, just provide a different feature to -the `task` argument when you initialize the ONNX configuration. For example, if -we wished to export DistilBERT with a sequence classification head, we could -use: +Notice that the outputs property follows the same structure as the inputs; it returns an +`OrderedDict` of named outputs and their shapes. The output structure is linked to the +choice of feature that the configuration is initialised with. By default, the ONNX +configuration is initialized with the `default` feature that corresponds to exporting a +model loaded with the `AutoModel` class. If you want to export a model for another task, +just provide a different feature to the `task` argument when you initialize the ONNX +configuration. For example, if we wished to export DistilBERT with a sequence +classification head, we could use: ```python >>> from transformers import AutoConfig @@ -410,18 +402,18 @@ OrderedDict([('logits', {0: 'batch'})]) -All of the base properties and methods associated with [`~onnx.config.OnnxConfig`] and the -other configuration classes can be overriden if needed. Check out -[`BartOnnxConfig`] for an advanced example. +All of the base properties and methods associated with [`~onnx.config.OnnxConfig`] and +the other configuration classes can be overriden if needed. Check out [`BartOnnxConfig`] +for an advanced example. -#### Exporting the model +### Exporting the model -Once you have implemented the ONNX configuration, the next step is to export the -model. Here we can use the `export()` function provided by the -`transformers.onnx` package. This function expects the ONNX configuration, along -with the base model and tokenizer, and the path to save the exported file: +Once you have implemented the ONNX configuration, the next step is to export the model. +Here we can use the `export()` function provided by the `transformers.onnx` package. +This function expects the ONNX configuration, along with the base model and tokenizer, +and the path to save the exported file: ```python >>> from pathlib import Path @@ -436,10 +428,9 @@ with the base model and tokenizer, and the path to save the exported file: >>> onnx_inputs, onnx_outputs = export(tokenizer, base_model, onnx_config, onnx_config.default_onnx_opset, onnx_path) ``` -The `onnx_inputs` and `onnx_outputs` returned by the `export()` function are -lists of the keys defined in the `inputs` and `outputs` properties of the -configuration. Once the model is exported, you can test that the model is well -formed as follows: +The `onnx_inputs` and `onnx_outputs` returned by the `export()` function are lists of +the keys defined in the `inputs` and `outputs` properties of the configuration. Once the +model is exported, you can test that the model is well formed as follows: ```python >>> import onnx @@ -450,21 +441,20 @@ formed as follows: -If your model is larger than 2GB, you will see that many additional files are -created during the export. This is _expected_ because ONNX uses [Protocol -Buffers](https://developers.google.com/protocol-buffers/) to store the model and -these have a size limit of 2GB. See the [ONNX -documentation](https://github.com/onnx/onnx/blob/master/docs/ExternalData.md) -for instructions on how to load models with external data. +If your model is larger than 2GB, you will see that many additional files are created +during the export. This is _expected_ because ONNX uses [Protocol +Buffers](https://developers.google.com/protocol-buffers/) to store the model and these +have a size limit of 2GB. See the [ONNX +documentation](https://github.com/onnx/onnx/blob/master/docs/ExternalData.md) for +instructions on how to load models with external data. -#### Validating the model outputs +### Validating the model outputs -The final step is to validate that the outputs from the base and exported model -agree within some absolute tolerance. Here we can use the -`validate_model_outputs()` function provided by the `transformers.onnx` package -as follows: +The final step is to validate that the outputs from the base and exported model agree +within some absolute tolerance. Here we can use the `validate_model_outputs()` function +provided by the `transformers.onnx` package as follows: ```python >>> from transformers.onnx import validate_model_outputs @@ -474,220 +464,23 @@ as follows: ... ) ``` -This function uses the `OnnxConfig.generate_dummy_inputs()` method to generate -inputs for the base and exported model, and the absolute tolerance can be -defined in the configuration. We generally find numerical agreement in the 1e-6 -to 1e-4 range, although anything smaller than 1e-3 is likely to be OK. +This function uses the [`~transformers.onnx.OnnxConfig.generate_dummy_inputs`] method to +generate inputs for the base and exported model, and the absolute tolerance can be +defined in the configuration. We generally find numerical agreement in the 1e-6 to 1e-4 +range, although anything smaller than 1e-3 is likely to be OK. -### Contributing a new configuration to 🤗 Transformers +## Contributing a new configuration to 🤗 Transformers -We are looking to expand the set of ready-made configurations and welcome -contributions from the community! If you would like to contribute your addition -to the library, you will need to: +We are looking to expand the set of ready-made configurations and welcome contributions +from the community! If you would like to contribute your addition to the library, you +will need to: * Implement the ONNX configuration in the corresponding `configuration_.py` file -* Include the model architecture and corresponding features in [`~onnx.features.FeatureManager`] +* Include the model architecture and corresponding features in + [`~onnx.features.FeatureManager`] * Add your model architecture to the tests in `test_onnx_v2.py` Check out how the configuration for [IBERT was -contributed](https://github.com/huggingface/transformers/pull/14868/files) to -get an idea of what's involved. - -## TorchScript - - - -This is the very beginning of our experiments with TorchScript and we are still exploring its capabilities with -variable-input-size models. It is a focus of interest to us and we will deepen our analysis in upcoming releases, -with more code examples, a more flexible implementation, and benchmarks comparing python-based codes with compiled -TorchScript. - - - -According to Pytorch's documentation: "TorchScript is a way to create serializable and optimizable models from PyTorch -code". Pytorch's two modules [JIT and TRACE](https://pytorch.org/docs/stable/jit.html) allow the developer to export -their model to be re-used in other programs, such as efficiency-oriented C++ programs. - -We have provided an interface that allows the export of 🤗 Transformers models to TorchScript so that they can be reused -in a different environment than a Pytorch-based python program. Here we explain how to export and use our models using -TorchScript. - -Exporting a model requires two things: - -- a forward pass with dummy inputs. -- model instantiation with the `torchscript` flag. - -These necessities imply several things developers should be careful about. These are detailed below. - -### TorchScript flag and tied weights - -This flag is necessary because most of the language models in this repository have tied weights between their -`Embedding` layer and their `Decoding` layer. TorchScript does not allow the export of models that have tied -weights, therefore it is necessary to untie and clone the weights beforehand. - -This implies that models instantiated with the `torchscript` flag have their `Embedding` layer and `Decoding` -layer separate, which means that they should not be trained down the line. Training would de-synchronize the two -layers, leading to unexpected results. - -This is not the case for models that do not have a Language Model head, as those do not have tied weights. These models -can be safely exported without the `torchscript` flag. - -### Dummy inputs and standard lengths - -The dummy inputs are used to do a model forward pass. While the inputs' values are propagating through the layers, -Pytorch keeps track of the different operations executed on each tensor. These recorded operations are then used to -create the "trace" of the model. - -The trace is created relatively to the inputs' dimensions. It is therefore constrained by the dimensions of the dummy -input, and will not work for any other sequence length or batch size. When trying with a different size, an error such -as: - -`The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2` - -will be raised. It is therefore recommended to trace the model with a dummy input size at least as large as the largest -input that will be fed to the model during inference. Padding can be performed to fill the missing values. As the model -will have been traced with a large input size however, the dimensions of the different matrix will be large as well, -resulting in more calculations. - -It is recommended to be careful of the total number of operations done on each input and to follow performance closely -when exporting varying sequence-length models. - -### Using TorchScript in Python - -Below is an example, showing how to save, load models as well as how to use the trace for inference. - -#### Saving a model - -This snippet shows how to use TorchScript to export a `BertModel`. Here the `BertModel` is instantiated according -to a `BertConfig` class and then saved to disk under the filename `traced_bert.pt` - -```python -from transformers import BertModel, BertTokenizer, BertConfig -import torch - -enc = BertTokenizer.from_pretrained("bert-base-uncased") - -# Tokenizing input text -text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" -tokenized_text = enc.tokenize(text) - -# Masking one of the input tokens -masked_index = 8 -tokenized_text[masked_index] = "[MASK]" -indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) -segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] - -# Creating a dummy input -tokens_tensor = torch.tensor([indexed_tokens]) -segments_tensors = torch.tensor([segments_ids]) -dummy_input = [tokens_tensor, segments_tensors] - -# Initializing the model with the torchscript flag -# Flag set to True even though it is not necessary as this model does not have an LM Head. -config = BertConfig( - vocab_size_or_config_json_file=32000, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - torchscript=True, -) - -# Instantiating the model -model = BertModel(config) - -# The model needs to be in evaluation mode -model.eval() - -# If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag -model = BertModel.from_pretrained("bert-base-uncased", torchscript=True) - -# Creating the trace -traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) -torch.jit.save(traced_model, "traced_bert.pt") -``` - -#### Loading a model - -This snippet shows how to load the `BertModel` that was previously saved to disk under the name `traced_bert.pt`. -We are re-using the previously initialised `dummy_input`. - -```python -loaded_model = torch.jit.load("traced_bert.pt") -loaded_model.eval() - -all_encoder_layers, pooled_output = loaded_model(*dummy_input) -``` - -#### Using a traced model for inference - -Using the traced model for inference is as simple as using its `__call__` dunder method: - -```python -traced_model(tokens_tensor, segments_tensors) -``` - -### Deploying HuggingFace TorchScript models on AWS using the Neuron SDK - -AWS introduced the [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/) -instance family for low cost, high performance machine learning inference in the cloud. -The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware accelerator, -specializing in deep learning inferencing workloads. -[AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#) -is the SDK for Inferentia that supports tracing and optimizing transformers models for -deployment on Inf1. The Neuron SDK provides: - - -1. Easy-to-use API with one line of code change to trace and optimize a TorchScript model for inference in the cloud. -2. Out of the box performance optimizations for [improved cost-performance](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/>) -3. Support for HuggingFace transformers models built with either [PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html) - or [TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html). - -#### Implications - -Transformers Models based on the [BERT (Bidirectional Encoder Representations from Transformers)](https://huggingface.co/docs/transformers/main/model_doc/bert) -architecture, or its variants such as [distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert) - and [roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta) - will run best on Inf1 for non-generative tasks such as Extractive Question Answering, - Sequence Classification, Token Classification. Alternatively, text generation -tasks can be adapted to run on Inf1, according to this [AWS Neuron MarianMT tutorial](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html). -More information about models that can be converted out of the box on Inferentia can be -found in the [Model Architecture Fit section of the Neuron documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia). - -#### Dependencies - -Using AWS Neuron to convert models requires the following dependencies and environment: - -* A [Neuron SDK environment](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide), - which comes pre-configured on [AWS Deep Learning AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html). - -#### Converting a Model for AWS Neuron - -Using the same script as in [Using TorchScript in Python](https://huggingface.co/docs/transformers/main/en/serialization#using-torchscript-in-python) -to trace a "BertModel", you import `torch.neuron` framework extension to access -the components of the Neuron SDK through a Python API. - -```python -from transformers import BertModel, BertTokenizer, BertConfig -import torch -import torch.neuron -``` -And only modify the tracing line of code - -from: - -```python -torch.jit.trace(model, [tokens_tensor, segments_tensors]) -``` - -to: - -```python -torch.neuron.trace(model, [token_tensor, segments_tensors]) -``` - -This change enables Neuron SDK to trace the model and optimize it to run in Inf1 instances. - -To learn more about AWS Neuron SDK features, tools, example tutorials and latest updates, -please see the [AWS NeuronSDK documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html). +contributed](https://github.com/huggingface/transformers/pull/14868/files) to get an +idea of what's involved. \ No newline at end of file diff --git a/docs/source/en/torchscript.mdx b/docs/source/en/torchscript.mdx new file mode 100644 index 00000000000000..0840973ad078ab --- /dev/null +++ b/docs/source/en/torchscript.mdx @@ -0,0 +1,225 @@ + + +# Export to TorchScript + + + +This is the very beginning of our experiments with TorchScript and we are still +exploring its capabilities with variable-input-size models. It is a focus of interest to +us and we will deepen our analysis in upcoming releases, with more code examples, a more +flexible implementation, and benchmarks comparing Python-based codes with compiled +TorchScript. + + + +According to the [TorchScript documentation](https://pytorch.org/docs/stable/jit.html): + +> TorchScript is a way to create serializable and optimizable models from PyTorch code. + +There are two PyTorch modules, [JIT and +TRACE](https://pytorch.org/docs/stable/jit.html), that allow developers to export their +models to be reused in other programs like efficiency-oriented C++ programs. + +We provide an interface that allows you to export 🤗 Transformers models to TorchScript +so they can be reused in a different environment than PyTorch-based Python programs. +Here, we explain how to export and use our models using TorchScript. + +Exporting a model requires two things: + +- model instantiation with the `torchscript` flag +- a forward pass with dummy inputs + +These necessities imply several things developers should be careful about as detailed +below. + +## TorchScript flag and tied weights + +The `torchscript` flag is necessary because most of the 🤗 Transformers language models +have tied weights between their `Embedding` layer and their `Decoding` layer. +TorchScript does not allow you to export models that have tied weights, so it is +necessary to untie and clone the weights beforehand. + +Models instantiated with the `torchscript` flag have their `Embedding` layer and +`Decoding` layer separated, which means that they should not be trained down the line. +Training would desynchronize the two layers, leading to unexpected results. + +This is not the case for models that do not have a language model head, as those do not +have tied weights. These models can be safely exported without the `torchscript` flag. + +## Dummy inputs and standard lengths + +The dummy inputs are used for a models forward pass. While the inputs' values are +propagated through the layers, PyTorch keeps track of the different operations executed +on each tensor. These recorded operations are then used to create the *trace* of the +model. + +The trace is created relative to the inputs' dimensions. It is therefore constrained by +the dimensions of the dummy input, and will not work for any other sequence length or +batch size. When trying with a different size, the following error is raised: + +``` +`The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2` +``` + +We recommended you trace the model with a dummy input size at least as large as the +largest input that will be fed to the model during inference. Padding can help fill the +missing values. However, since the model is traced with a larger input size, the +dimensions of the matrix will also be large, resulting in more calculations. + +Be careful of the total number of operations done on each input and follow the +performance closely when exporting varying sequence-length models. + +## Using TorchScript in Python + +This section demonstrates how to save and load models as well as how to use the trace +for inference. + +### Saving a model + +To export a `BertModel` with TorchScript, instantiate `BertModel` from the `BertConfig` +class and then save it to disk under the filename `traced_bert.pt`: + +```python +from transformers import BertModel, BertTokenizer, BertConfig +import torch + +enc = BertTokenizer.from_pretrained("bert-base-uncased") + +# Tokenizing input text +text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]" +tokenized_text = enc.tokenize(text) + +# Masking one of the input tokens +masked_index = 8 +tokenized_text[masked_index] = "[MASK]" +indexed_tokens = enc.convert_tokens_to_ids(tokenized_text) +segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1] + +# Creating a dummy input +tokens_tensor = torch.tensor([indexed_tokens]) +segments_tensors = torch.tensor([segments_ids]) +dummy_input = [tokens_tensor, segments_tensors] + +# Initializing the model with the torchscript flag +# Flag set to True even though it is not necessary as this model does not have an LM Head. +config = BertConfig( + vocab_size_or_config_json_file=32000, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + torchscript=True, +) + +# Instantiating the model +model = BertModel(config) + +# The model needs to be in evaluation mode +model.eval() + +# If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag +model = BertModel.from_pretrained("bert-base-uncased", torchscript=True) + +# Creating the trace +traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors]) +torch.jit.save(traced_model, "traced_bert.pt") +``` + +### Loading a model + +Now you can load the previously saved `BertModel`, `traced_bert.pt`, from disk and use +it on the previously initialised `dummy_input`: + +```python +loaded_model = torch.jit.load("traced_bert.pt") +loaded_model.eval() + +all_encoder_layers, pooled_output = loaded_model(*dummy_input) +``` + +### Using a traced model for inference + +Use the traced model for inference by using its `__call__` dunder method: + +```python +traced_model(tokens_tensor, segments_tensors) +``` + +## Deploy Hugging Face TorchScript models to AWS with the Neuron SDK + +AWS introduced the [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/) +instance family for low cost, high performance machine learning inference in the cloud. +The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware +accelerator, specializing in deep learning inferencing workloads. [AWS +Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#) is the SDK for +Inferentia that supports tracing and optimizing transformers models for deployment on +Inf1. The Neuron SDK provides: + + +1. Easy-to-use API with one line of code change to trace and optimize a TorchScript + model for inference in the cloud. +2. Out of the box performance optimizations for [improved + cost-performance](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/>). +3. Support for Hugging Face transformers models built with either + [PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html) + or + [TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html). + +### Implications + +Transformers models based on the [BERT (Bidirectional Encoder Representations from +Transformers)](https://huggingface.co/docs/transformers/main/model_doc/bert) +architecture, or its variants such as +[distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert) and +[roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta) run best on +Inf1 for non-generative tasks such as extractive question answering, sequence +classification, and token classification. However, text generation tasks can still be +adapted to run on Inf1 according to this [AWS Neuron MarianMT +tutorial](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html). +More information about models that can be converted out of the box on Inferentia can be +found in the [Model Architecture +Fit](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia) +section of the Neuron documentation. + +### Dependencies + +Using AWS Neuron to convert models requires a [Neuron SDK +environment](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide) +which comes preconfigured on [AWS Deep Learning +AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html). + +### Converting a model for AWS Neuron + +Convert a model for AWS NEURON using the same code from [Using TorchScript in +Python](serialization#using-torchscript-in-python) to trace a `BertModel`. Import the +`torch.neuron` framework extension to access the components of the Neuron SDK through a +Python API: + +```python +from transformers import BertModel, BertTokenizer, BertConfig +import torch +import torch.neuron +``` + +You only need to modify the following line: + +```diff +- torch.jit.trace(model, [tokens_tensor, segments_tensors]) ++ torch.neuron.trace(model, [token_tensor, segments_tensors]) +``` + +This enables the Neuron SDK to trace the model and optimize it for Inf1 instances. + +To learn more about AWS Neuron SDK features, tools, example tutorials and latest +updates, please see the [AWS NeuronSDK +documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html). From 008531c18a4d5c538510dc5569832d125e480de6 Mon Sep 17 00:00:00 2001 From: Andrew Sansom Date: Mon, 3 Oct 2022 16:37:09 -0500 Subject: [PATCH 440/539] Update Protobuf dependency version to fix known vulnerability (#19247) * Update protobuf dependency to fix vulnerability * Update `dependency_versions_table.py` to include updated protobuf. --- setup.py | 2 +- src/transformers/dependency_versions_table.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index b74a5c76581a90..b82de85b9b2cd1 100644 --- a/setup.py +++ b/setup.py @@ -133,7 +133,7 @@ "packaging>=20.0", "parameterized", "phonemizer", - "protobuf<=3.20.1", + "protobuf<=3.20.2", "psutil", "pyyaml>=5.1", "pydantic", diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index 89522d9c6ac4ad..d8d2fce767a570 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -39,7 +39,7 @@ "packaging": "packaging>=20.0", "parameterized": "parameterized", "phonemizer": "phonemizer", - "protobuf": "protobuf<=3.20.1", + "protobuf": "protobuf<=3.20.2", "psutil": "psutil", "pyyaml": "pyyaml>=5.1", "pydantic": "pydantic", From ca26277e335cf8eb14dcfaa4afae8dca2dcede10 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 23:49:35 +0200 Subject: [PATCH 441/539] Bump joblib from 0.16.0 to 1.2.0 in /examples/research_projects/lxmert (#19268) Bumps [joblib](https://github.com/joblib/joblib) from 0.16.0 to 1.2.0. - [Release notes](https://github.com/joblib/joblib/releases) - [Changelog](https://github.com/joblib/joblib/blob/master/CHANGES.rst) - [Commits](https://github.com/joblib/joblib/compare/0.16.0...1.2.0) --- updated-dependencies: - dependency-name: joblib dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/research_projects/lxmert/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/lxmert/requirements.txt b/examples/research_projects/lxmert/requirements.txt index a4f21439b59b56..8accf0a6015eb4 100644 --- a/examples/research_projects/lxmert/requirements.txt +++ b/examples/research_projects/lxmert/requirements.txt @@ -29,7 +29,7 @@ ipython-genutils==0.2.0 ipywidgets==7.5.1 jedi==0.17.2 Jinja2>=2.11.3 -joblib==0.16.0 +joblib==1.2.0 jsonschema==3.2.0 jupyter==1.0.0 jupyter-client==6.1.7 From c7ec0afce02820dfc1e2bd194e38bcf59bf3805d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 23:57:37 +0200 Subject: [PATCH 442/539] Bump joblib in /examples/research_projects/decision_transformer (#19270) Bumps [joblib](https://github.com/joblib/joblib) from 1.1.0 to 1.2.0. - [Release notes](https://github.com/joblib/joblib/releases) - [Changelog](https://github.com/joblib/joblib/blob/master/CHANGES.rst) - [Commits](https://github.com/joblib/joblib/compare/1.1.0...1.2.0) --- updated-dependencies: - dependency-name: joblib dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .../research_projects/decision_transformer/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/decision_transformer/requirements.txt b/examples/research_projects/decision_transformer/requirements.txt index ba3e6a2d34fe48..6d393d15df2557 100644 --- a/examples/research_projects/decision_transformer/requirements.txt +++ b/examples/research_projects/decision_transformer/requirements.txt @@ -95,7 +95,7 @@ jedi==0.18.1 Jinja2==2.11.3 jinja2-time==0.2.0 jmespath==0.10.0 -joblib==1.1.0 +joblib==1.2.0 jsonschema==4.4.0 keras==2.8.0 Keras-Preprocessing==1.1.2 From 4c962d5e790d06c142af35aad165c74c0bcf861a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Oct 2022 23:57:50 +0200 Subject: [PATCH 443/539] Bump joblib in /examples/research_projects/visual_bert (#19269) Bumps [joblib](https://github.com/joblib/joblib) from 0.16.0 to 1.2.0. - [Release notes](https://github.com/joblib/joblib/releases) - [Changelog](https://github.com/joblib/joblib/blob/master/CHANGES.rst) - [Commits](https://github.com/joblib/joblib/compare/0.16.0...1.2.0) --- updated-dependencies: - dependency-name: joblib dependency-type: direct:production ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- examples/research_projects/visual_bert/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/research_projects/visual_bert/requirements.txt b/examples/research_projects/visual_bert/requirements.txt index a4f21439b59b56..8accf0a6015eb4 100644 --- a/examples/research_projects/visual_bert/requirements.txt +++ b/examples/research_projects/visual_bert/requirements.txt @@ -29,7 +29,7 @@ ipython-genutils==0.2.0 ipywidgets==7.5.1 jedi==0.17.2 Jinja2>=2.11.3 -joblib==0.16.0 +joblib==1.2.0 jsonschema==3.2.0 jupyter==1.0.0 jupyter-client==6.1.7 From 534cd8ff94d783ea7162765f58045b019c71e3c4 Mon Sep 17 00:00:00 2001 From: Shubham S Jagtap <63872951+ShubhamJagtap2000@users.noreply.github.com> Date: Tue, 4 Oct 2022 17:16:50 +0530 Subject: [PATCH 444/539] Update README.md (#19309) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 4eb429652ab013..d711e861cdcd64 100644 --- a/README.md +++ b/README.md @@ -408,7 +408,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h To check if each model has an implementation in Flax, PyTorch or TensorFlow, or has an associated tokenizer backed by the 🤗 Tokenizers library, refer to [this table](https://huggingface.co/docs/transformers/index#supported-frameworks). -These implementations have been tested on several datasets (see the example scripts) and should match the performance of the original implementations. You can find more details on performance in the Examples section of the [documentation](https://huggingface.co/docs/transformers/examples). +These implementations have been tested on several datasets (see the example scripts) and should match the performance of the original implementations. You can find more details on performance in the Examples section of the [documentation](https://github.com/huggingface/transformers/tree/main/examples). ## Learn more From fe10796f4f492fe4e438c266ccac139ced81e8ae Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Tue, 4 Oct 2022 15:00:52 +0200 Subject: [PATCH 445/539] [Docs] Fix link (#19313) --- src/transformers/modeling_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/modeling_utils.py b/src/transformers/modeling_utils.py index ec876af9e55e1f..5f4fccd335ce27 100644 --- a/src/transformers/modeling_utils.py +++ b/src/transformers/modeling_utils.py @@ -1777,7 +1777,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [designing a device - map](https://hf.co/docs/accelerate/main/big_modeling#designing-a-device-map). + map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map). max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. From 3a1a56a8fef3bc8586cb150186a10f5776dcb7ef Mon Sep 17 00:00:00 2001 From: Matt Date: Tue, 4 Oct 2022 14:48:27 +0100 Subject: [PATCH 446/539] Fix for sequence regression fit() in TF (#19316) Co-authored-by: Your Name --- src/transformers/modeling_tf_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index c0f15592866e71..8bccea12b33f0c 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -274,6 +274,9 @@ class TFSequenceClassificationLoss: def hf_compute_loss(self, labels, logits): if logits.shape.rank == 1 or logits.shape[1] == 1: loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE) + if labels.shape.rank == 1: + # MeanSquaredError returns a scalar loss if the labels are 1D, so avoid that + labels = tf.expand_dims(labels, axis=-1) else: loss_fn = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction=tf.keras.losses.Reduction.NONE From ac5ea74ee8a263157604f986aecb1806df82d46e Mon Sep 17 00:00:00 2001 From: IMvision12 <88665786+IMvision12@users.noreply.github.com> Date: Tue, 4 Oct 2022 19:25:15 +0530 Subject: [PATCH 447/539] Added Type hints for LED TF (#19315) * Update modeling_tf_led.py * Update modeling_tf_led.py --- .../models/led/modeling_tf_led.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/src/transformers/models/led/modeling_tf_led.py b/src/transformers/models/led/modeling_tf_led.py index 0a803212dabf22..bfd1954a86ae6f 100644 --- a/src/transformers/models/led/modeling_tf_led.py +++ b/src/transformers/models/led/modeling_tf_led.py @@ -19,6 +19,7 @@ from dataclasses import dataclass from typing import List, Optional, Tuple, Union +import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation @@ -26,6 +27,7 @@ # Public API from ...modeling_tf_utils import ( + TFModelInputType, TFPreTrainedModel, TFSharedEmbeddings, TFWrappedEmbeddings, @@ -2390,23 +2392,23 @@ def set_output_embeddings(self, value): @replace_return_docstrings(output_type=TFLEDSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, - input_ids=None, - attention_mask=None, - decoder_input_ids=None, - decoder_attention_mask=None, - head_mask=None, - decoder_head_mask=None, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_outputs: Optional[TFLEDEncoderBaseModelOutput] = None, - global_attention_mask=None, - past_key_values=None, - inputs_embeds=None, - decoder_inputs_embeds=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - labels=None, - training=False, + global_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[tf.Tensor] = None, + training: bool = False, ): """ Returns: From 9b630168a92153347ca1bc2578477fc4b861bdf3 Mon Sep 17 00:00:00 2001 From: Debjit Bhowal <68442560+debjit-bw@users.noreply.github.com> Date: Tue, 4 Oct 2022 19:26:35 +0530 Subject: [PATCH 448/539] Added type hints for TF: rag model (#19284) * Added type hints for TF: rag model * TFModelInputType added in place of TF.Tensor * reformatting by black --- .../models/rag/modeling_tf_rag.py | 130 +++++++++--------- 1 file changed, 68 insertions(+), 62 deletions(-) diff --git a/src/transformers/models/rag/modeling_tf_rag.py b/src/transformers/models/rag/modeling_tf_rag.py index 08a9adf591aae2..cd09a83d9f0437 100644 --- a/src/transformers/models/rag/modeling_tf_rag.py +++ b/src/transformers/models/rag/modeling_tf_rag.py @@ -16,13 +16,19 @@ """TFRAG model implementation.""" from dataclasses import dataclass -from typing import List, Optional, Tuple +from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...configuration_utils import PretrainedConfig -from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, shape_list, unpack_inputs +from ...modeling_tf_utils import ( + TFCausalLanguageModelingLoss, + TFModelInputType, + TFPreTrainedModel, + shape_list, + unpack_inputs, +) from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_rag import RagConfig from .retrieval_rag import RagRetriever @@ -491,7 +497,7 @@ def __init__( config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, - retriever: Optional = None, + retriever: Optional[RagRetriever] = None, load_weight_prefix: Optional[str] = None, **kwargs, ): @@ -538,22 +544,22 @@ def set_retriever(self, retriever: RagRetriever): @replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC) def call( self, - input_ids=None, - attention_mask=None, - encoder_outputs=None, - decoder_input_ids=None, - decoder_attention_mask=None, - past_key_values=None, - doc_scores=None, - context_input_ids=None, - context_attention_mask=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - output_retrieved=None, - n_docs=None, - return_dict=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_outputs: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + doc_scores: Optional[Union[np.ndarray, tf.Tensor]] = None, + context_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + context_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_retrieved: Optional[bool] = None, + n_docs: Optional[int] = None, + return_dict: Optional[bool] = None, + training: bool = False, **kwargs ): r""" @@ -726,7 +732,7 @@ def __init__( config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, - retriever: Optional = None, + retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( @@ -828,25 +834,25 @@ def marginalize(self, seq_logits, doc_scores, n_docs=None): @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, - input_ids=None, - attention_mask=None, - decoder_input_ids=None, - decoder_attention_mask=None, - encoder_outputs=None, - past_key_values=None, - doc_scores=None, - context_input_ids=None, - context_attention_mask=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - output_retrieved=None, - n_docs=None, - do_marginalize=None, - labels=None, - reduce_loss=None, - return_dict=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_outputs: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + doc_scores: Optional[Union[np.ndarray, tf.Tensor]] = None, + context_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + context_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_retrieved: Optional[bool] = None, + n_docs: Optional[int] = None, + do_marginalize: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + reduce_loss: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, **kwargs # needs kwargs for generation ): r""" @@ -980,7 +986,7 @@ def call( def generate( self, - input_ids: Optional[tf.Tensor] = None, + input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[tf.Tensor] = None, context_input_ids=None, context_attention_mask=None, @@ -1381,7 +1387,7 @@ def __init__( config: Optional[PretrainedConfig] = None, question_encoder: Optional[TFPreTrainedModel] = None, generator: Optional[TFPreTrainedModel] = None, - retriever: Optional = None, + retriever: Optional[RagRetriever] = None, **kwargs, ): assert config is not None or ( @@ -1425,27 +1431,27 @@ def question_encoder(self): @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC) def call( self, - input_ids=None, - attention_mask=None, - decoder_input_ids=None, - decoder_attention_mask=None, - encoder_outputs=None, - past_key_values=None, - doc_scores=None, - context_input_ids=None, - context_attention_mask=None, - use_cache=None, - output_attentions=None, - output_hidden_states=None, - output_retrieved=None, - n_docs=None, - exclude_bos_score=None, - labels=None, - reduce_loss=None, - return_dict=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + decoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_outputs: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + doc_scores: Optional[Union[np.ndarray, tf.Tensor]] = None, + context_input_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + context_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_retrieved: Optional[bool] = None, + n_docs: Optional[int] = None, + exclude_bos_score: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + reduce_loss: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, **kwargs # needs kwargs for generation - ): + ) -> Union[Tuple[tf.Tensor], TFRetrievAugLMMarginOutput]: r""" exclude_bos_score (`bool`, *optional*): Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing @@ -1657,7 +1663,7 @@ def gather2d(target, id_tensor): def generate( self, - input_ids: Optional[tf.Tensor] = None, + input_ids: Optional[TFModelInputType] = None, attention_mask: Optional[tf.Tensor] = None, context_input_ids=None, context_attention_mask=None, From cc263e9bb47ae0532dff940e3f2d48c8bab691f6 Mon Sep 17 00:00:00 2001 From: gouqi_nju <52353600+gouqi666@users.noreply.github.com> Date: Tue, 4 Oct 2022 22:00:47 +0800 Subject: [PATCH 449/539] alter retrived to retrieved (#18863) --- src/transformers/models/rag/retrieval_rag.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/rag/retrieval_rag.py b/src/transformers/models/rag/retrieval_rag.py index 797c1a7332acf7..261255b9f62f59 100644 --- a/src/transformers/models/rag/retrieval_rag.py +++ b/src/transformers/models/rag/retrieval_rag.py @@ -612,17 +612,17 @@ def __call__( ) if self.return_tokenized_docs: - retrived_doc_text = [] - retrived_doc_title = [] + retrieved_doc_text = [] + retrieved_doc_title = [] for b_idx in range(len(docs)): for doc_idx in range(n_docs): - retrived_doc_text.append(docs[b_idx]["text"][doc_idx]) - retrived_doc_title.append(docs[b_idx]["title"][doc_idx]) + retrieved_doc_text.append(docs[b_idx]["text"][doc_idx]) + retrieved_doc_title.append(docs[b_idx]["title"][doc_idx]) tokenized_docs = self.ctx_encoder_tokenizer( - retrived_doc_title, - retrived_doc_text, + retrieved_doc_title, + retrieved_doc_text, truncation=True, padding="longest", return_tensors=return_tensors, From ca3ebc44e07299816071143392fa6d66bc7bf586 Mon Sep 17 00:00:00 2001 From: Oscar Dominguez Date: Tue, 4 Oct 2022 16:07:33 +0200 Subject: [PATCH 450/539] ci(stale.yml): upgrade actions/setup-python to v4 (#19281) --- .github/workflows/stale.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 01b19cda84184f..395d6953b91bf8 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v2 - name: Setup Python - uses: actions/setup-python@v1 + uses: actions/setup-python@v4 with: python-version: 3.7 @@ -24,4 +24,4 @@ jobs: pip install PyGithub - name: Close stale issues run: | - python scripts/stale.py \ No newline at end of file + python scripts/stale.py From cd024da6f8c66bcff4b630ee02acc9366722877f Mon Sep 17 00:00:00 2001 From: Oscar Dominguez Date: Tue, 4 Oct 2022 16:07:53 +0200 Subject: [PATCH 451/539] ci(workflows): update actions/checkout to v3 (#19280) in stale.yml --- .github/workflows/stale.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 395d6953b91bf8..9412442a7d0a78 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -12,7 +12,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Setup Python uses: actions/setup-python@v4 From f134d385535d10ee4c0950223e6ddfdc109c99df Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 4 Oct 2022 19:38:29 +0530 Subject: [PATCH 452/539] wrap forward passes with torch.no_grad() (#19279) --- tests/models/deit/test_modeling_deit.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/deit/test_modeling_deit.py b/tests/models/deit/test_modeling_deit.py index 27f92c2d976a61..82b7f286925cfb 100644 --- a/tests/models/deit/test_modeling_deit.py +++ b/tests/models/deit/test_modeling_deit.py @@ -384,7 +384,8 @@ def test_inference_image_classification_head(self): inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) From 2403dbd6073796b644a0610bc6268bf4ed8277cd Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 4 Oct 2022 19:39:23 +0530 Subject: [PATCH 453/539] wrap forward passes with torch.no_grad() (#19278) --- tests/models/deberta_v2/test_modeling_deberta_v2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/deberta_v2/test_modeling_deberta_v2.py b/tests/models/deberta_v2/test_modeling_deberta_v2.py index 93436b901bb171..8c9bf3bbf7e5c9 100644 --- a/tests/models/deberta_v2/test_modeling_deberta_v2.py +++ b/tests/models/deberta_v2/test_modeling_deberta_v2.py @@ -299,7 +299,8 @@ def test_inference_no_head(self): input_ids = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) - output = model(input_ids, attention_mask=attention_mask)[0] + with torch.no_grad(): + output = model(input_ids, attention_mask=attention_mask)[0] # compare the actual values for a slice. expected_slice = torch.tensor( [[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] From d6e920449ec26be7e15616ed6835ca96f0a99a56 Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 4 Oct 2022 19:42:03 +0530 Subject: [PATCH 454/539] wrap forward passes with torch.no_grad() (#19274) --- tests/models/convbert/test_modeling_convbert.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/convbert/test_modeling_convbert.py b/tests/models/convbert/test_modeling_convbert.py index d3eb0aec4cfc0d..f2b82aaadf3299 100644 --- a/tests/models/convbert/test_modeling_convbert.py +++ b/tests/models/convbert/test_modeling_convbert.py @@ -444,7 +444,8 @@ class ConvBertModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = ConvBertModel.from_pretrained("YituTech/conv-bert-base") input_ids = torch.tensor([[1, 2, 3, 4, 5, 6]]) - output = model(input_ids)[0] + with torch.no_grad(): + output = model(input_ids)[0] expected_shape = torch.Size((1, 6, 768)) self.assertEqual(output.shape, expected_shape) From a9782881a40ecc905a658b6cd3e561548d78c8ec Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 4 Oct 2022 19:43:22 +0530 Subject: [PATCH 455/539] wrap forward passes with torch.no_grad() (#19273) --- tests/models/big_bird/test_modeling_big_bird.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/models/big_bird/test_modeling_big_bird.py b/tests/models/big_bird/test_modeling_big_bird.py index ec59f8f93d6ee4..ec8705607d6578 100644 --- a/tests/models/big_bird/test_modeling_big_bird.py +++ b/tests/models/big_bird/test_modeling_big_bird.py @@ -627,7 +627,8 @@ def test_inference_block_sparse_pretraining(self): model.to(torch_device) input_ids = torch.tensor([[20920, 232, 328, 1437] * 1024], dtype=torch.long, device=torch_device) - outputs = model(input_ids) + with torch.no_grad(): + outputs = model(input_ids) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits @@ -655,7 +656,8 @@ def test_inference_full_pretraining(self): model.to(torch_device) input_ids = torch.tensor([[20920, 232, 328, 1437] * 512], dtype=torch.long, device=torch_device) - outputs = model(input_ids) + with torch.no_grad(): + outputs = model(input_ids) prediction_logits = outputs.prediction_logits seq_relationship_logits = outputs.seq_relationship_logits @@ -920,7 +922,8 @@ def test_auto_padding(self): model.eval() input_ids = torch.tensor([200 * [10] + 40 * [2] + [1]], device=torch_device, dtype=torch.long) - output = model(input_ids).to_tuple()[0] + with torch.no_grad(): + output = model(input_ids).to_tuple()[0] # fmt: off target = torch.tensor( From 6fd254a37d7cb3d9a024b1b502bcc4b034caa013 Mon Sep 17 00:00:00 2001 From: Arnaud Stiegler Date: Tue, 4 Oct 2022 10:24:07 -0400 Subject: [PATCH 456/539] Removing BertConfig inheritance from LayoutLMConfig (#19307) * removing BertConfig inheritance * fix missing arguments --- .../models/layoutlm/configuration_layoutlm.py | 38 ++++++++++--------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/src/transformers/models/layoutlm/configuration_layoutlm.py b/src/transformers/models/layoutlm/configuration_layoutlm.py index 94100791d39ff2..ebf2cb28a5ed8c 100644 --- a/src/transformers/models/layoutlm/configuration_layoutlm.py +++ b/src/transformers/models/layoutlm/configuration_layoutlm.py @@ -21,7 +21,6 @@ from ... import is_torch_available from ...onnx import OnnxConfig, PatchingSpec from ...utils import logging -from ..bert.configuration_bert import BertConfig logger = logging.get_logger(__name__) @@ -36,7 +35,7 @@ } -class LayoutLMConfig(BertConfig): +class LayoutLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LayoutLMModel`]. It is used to instantiate a LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a configuration @@ -110,25 +109,28 @@ def __init__( initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, max_2d_position_embeddings=1024, **kwargs ): - super().__init__( - vocab_size=vocab_size, - hidden_size=hidden_size, - num_hidden_layers=num_hidden_layers, - num_attention_heads=num_attention_heads, - intermediate_size=intermediate_size, - hidden_act=hidden_act, - hidden_dropout_prob=hidden_dropout_prob, - attention_probs_dropout_prob=attention_probs_dropout_prob, - max_position_embeddings=max_position_embeddings, - type_vocab_size=type_vocab_size, - initializer_range=initializer_range, - layer_norm_eps=layer_norm_eps, - pad_token_id=pad_token_id, - **kwargs, - ) + super().__init__(pad_token_id=pad_token_id, **kwargs) + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout self.max_2d_position_embeddings = max_2d_position_embeddings From 6dce9e0cdd4df98e975b532532712c147c03d398 Mon Sep 17 00:00:00 2001 From: Sushrut1101 Date: Tue, 4 Oct 2022 19:56:52 +0530 Subject: [PATCH 457/539] docker-build: Update actions/checkout to v3 (#19288) --- .github/workflows/build-docker-images.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-docker-images.yml b/.github/workflows/build-docker-images.yml index 88934cbcbc5bb4..cfb5e59f07e2eb 100644 --- a/.github/workflows/build-docker-images.yml +++ b/.github/workflows/build-docker-images.yml @@ -27,7 +27,7 @@ jobs: uses: docker/setup-buildx-action@v1 - name: Check out code - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: Login to DockerHub uses: docker/login-action@v1 From 587d84b1784cce30c59a12faee2a672bac49bbdd Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Tue, 4 Oct 2022 17:52:13 +0200 Subject: [PATCH 458/539] Add `BloomForQuestionAnswering` (#19310) * add bloom for question answering - attempt to add Bloom for question answering - adapted from `GPTJForQuestionAnswering` - Fixed `num_labels` to `2` for common tests - Added a bit of docstring - All common tests pass * Update src/transformers/models/bloom/modeling_bloom.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * revert changes related to `num_labels` Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- docs/source/en/model_doc/bloom.mdx | 5 + src/transformers/__init__.py | 2 + src/transformers/models/auto/modeling_auto.py | 1 + src/transformers/models/bloom/__init__.py | 2 + .../models/bloom/modeling_bloom.py | 93 +++++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 7 ++ tests/models/bloom/test_modeling_bloom.py | 10 ++ 7 files changed, 120 insertions(+) diff --git a/docs/source/en/model_doc/bloom.mdx b/docs/source/en/model_doc/bloom.mdx index cf415603d0fe8b..3fc48ab9746be0 100644 --- a/docs/source/en/model_doc/bloom.mdx +++ b/docs/source/en/model_doc/bloom.mdx @@ -55,3 +55,8 @@ Several smaller versions of the models have been trained on the same dataset. BL [[autodoc]] BloomForTokenClassification - forward + +## BloomForQuestionAnswering + +[[autodoc]] BloomForQuestionAnswering + - forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index e5e6e6c171c0dd..b3c6cca623ef05 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -993,6 +993,7 @@ "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", + "BloomForQuestionAnswering", ] ) _import_structure["models.blenderbot"].extend( @@ -3857,6 +3858,7 @@ from .models.bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, + BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 8821cfb6c93e91..5cac7e7bda6fcd 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -572,6 +572,7 @@ ("bert", "BertForQuestionAnswering"), ("big_bird", "BigBirdForQuestionAnswering"), ("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"), + ("bloom", "BloomForQuestionAnswering"), ("camembert", "CamembertForQuestionAnswering"), ("canine", "CanineForQuestionAnswering"), ("convbert", "ConvBertForQuestionAnswering"), diff --git a/src/transformers/models/bloom/__init__.py b/src/transformers/models/bloom/__init__.py index 9aea71885883ce..ece85ac301228c 100644 --- a/src/transformers/models/bloom/__init__.py +++ b/src/transformers/models/bloom/__init__.py @@ -45,6 +45,7 @@ "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", + "BloomForQuestionAnswering", ] if TYPE_CHECKING: @@ -67,6 +68,7 @@ from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, + BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, diff --git a/src/transformers/models/bloom/modeling_bloom.py b/src/transformers/models/bloom/modeling_bloom.py index 21eaded45b0c76..23404d1215d4d7 100644 --- a/src/transformers/models/bloom/modeling_bloom.py +++ b/src/transformers/models/bloom/modeling_bloom.py @@ -28,6 +28,7 @@ from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, + QuestionAnsweringModelOutput, SequenceClassifierOutputWithPast, TokenClassifierOutput, ) @@ -1167,3 +1168,95 @@ def forward( hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) + + +@add_start_docstrings( + """ + The BLOOM Model transformer with a span classification head on top for extractive question-answering tasks like + SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + BLOOM_START_DOCSTRING, +) +class BloomForQuestionAnswering(BloomPreTrainedModel): + _keys_to_ignore_on_load_missing = [r"h.*.self_attention.scale_mask_softmax.causal_mask", r"lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.transformer = BloomModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, 2) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.transformer( + input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 9f540bd2838634..f3583e1b61efdc 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -1006,6 +1006,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class BloomForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class BloomForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] diff --git a/tests/models/bloom/test_modeling_bloom.py b/tests/models/bloom/test_modeling_bloom.py index e9ae51a9f554d8..06cec20456f533 100644 --- a/tests/models/bloom/test_modeling_bloom.py +++ b/tests/models/bloom/test_modeling_bloom.py @@ -31,6 +31,7 @@ from transformers import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, + BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, @@ -274,6 +275,14 @@ def create_and_check_token_classification_model(self, config, input_ids, input_m result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) + def create_and_check_question_answering_model(self, config, input_ids, input_mask, *args): + model = BloomForQuestionAnswering(config) + model.to(torch_device) + model.eval() + + result = model(input_ids, attention_mask=input_mask) + self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) + def create_and_check_forward_and_backwards( self, config, input_ids, input_mask, *args, gradient_checkpointing=False ): @@ -314,6 +323,7 @@ class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase) BloomForCausalLM, BloomForSequenceClassification, BloomForTokenClassification, + BloomForQuestionAnswering, ) if is_torch_available() else () From 971da2e6ec256b7b263be7035462fc58dd4d3d35 Mon Sep 17 00:00:00 2001 From: Samuel Arcadinho Date: Tue, 4 Oct 2022 19:28:28 +0100 Subject: [PATCH 459/539] Clamping hidden state values to allow FP16 (#19229) * Clamping hidden state values to allow FP16 * Reformating * Adding missing if condition * Update src/transformers/models/longt5/modeling_longt5.py Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> * Update src/transformers/models/longt5/modeling_longt5.py Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> * Update src/transformers/models/longt5/modeling_longt5.py Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> * Formating file Co-authored-by: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> --- src/transformers/models/longt5/modeling_longt5.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/src/transformers/models/longt5/modeling_longt5.py b/src/transformers/models/longt5/modeling_longt5.py index 114b6564b524af..f592e5bc7e1136 100644 --- a/src/transformers/models/longt5/modeling_longt5.py +++ b/src/transformers/models/longt5/modeling_longt5.py @@ -1199,6 +1199,11 @@ def forward( hidden_states, present_key_value_state = self_attention_outputs[:2] attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights + # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ + if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + do_cross_attention = self.is_decoder and encoder_hidden_states is not None if do_cross_attention: # the actual query length is unknown for cross attention @@ -1221,6 +1226,11 @@ def forward( ) hidden_states = cross_attention_outputs[0] + # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ + if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + # Combine self attn and cross attn key value states if present_key_value_state is not None: present_key_value_state = present_key_value_state + cross_attention_outputs[1] @@ -1231,6 +1241,11 @@ def forward( # Apply Feed Forward layer hidden_states = self.layer[-1](hidden_states) + # clamp inf values to enable fp16 inference - check https://github.com/huggingface/transformers/pull/19229/ + if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any(): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + outputs = (hidden_states,) if use_cache: From bf7eb0c9b3c804f5314397c8a5473b2f8b98ef96 Mon Sep 17 00:00:00 2001 From: Erin Date: Tue, 4 Oct 2022 22:51:55 +0100 Subject: [PATCH 460/539] Remove interdependency from OpenAI tokenizer (#19327) * Remove interdependency from OpenAI tokenizer * Adjust import order for linter --- .../models/openai/tokenization_openai.py | 164 +++++++++++++++++- 1 file changed, 162 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/openai/tokenization_openai.py b/src/transformers/models/openai/tokenization_openai.py index 40bb824cd7186d..96fd492dbb6c46 100644 --- a/src/transformers/models/openai/tokenization_openai.py +++ b/src/transformers/models/openai/tokenization_openai.py @@ -18,11 +18,11 @@ import json import os import re +import unicodedata from typing import Optional, Tuple -from ...tokenization_utils import PreTrainedTokenizer +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging -from ..bert.tokenization_bert import BasicTokenizer logger = logging.get_logger(__name__) @@ -42,6 +42,166 @@ } +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length From 6268694e27f1fc0192ba24e4bec181061b4a9bf8 Mon Sep 17 00:00:00 2001 From: Druhin Abrol Date: Wed, 5 Oct 2022 05:09:47 +0530 Subject: [PATCH 461/539] removing XLMConfig inheritance from FlaubertConfig (#19326) * removing XLMConfig inheritance from FlaubertConfig * removing XLMConfig inheritance from FlaubertConfig * Fixed styling issue * Update configuration_flaubert.py Co-authored-by: Druhin Abrol --- .../models/flaubert/configuration_flaubert.py | 92 +++++++++++++++++-- 1 file changed, 85 insertions(+), 7 deletions(-) diff --git a/src/transformers/models/flaubert/configuration_flaubert.py b/src/transformers/models/flaubert/configuration_flaubert.py index eedf3d3f855ac8..7d9c60338516e3 100644 --- a/src/transformers/models/flaubert/configuration_flaubert.py +++ b/src/transformers/models/flaubert/configuration_flaubert.py @@ -12,14 +12,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" Flaubert configuration, based on XLM.""" - +""" Flaubert configuration""" from collections import OrderedDict from typing import Mapping +from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging -from ..xlm.configuration_xlm import XLMConfig logger = logging.get_logger(__name__) @@ -32,7 +31,7 @@ } -class FlaubertConfig(XLMConfig): +class FlaubertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`FlaubertModel`] or a [`TFFlaubertModel`]. It is used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture. @@ -137,11 +136,90 @@ class FlaubertConfig(XLMConfig): """ model_type = "flaubert" - - def __init__(self, layerdrop=0.0, pre_norm=False, pad_token_id=2, bos_token_id=0, **kwargs): + attribute_map = { + "hidden_size": "emb_dim", + "num_attention_heads": "n_heads", + "num_hidden_layers": "n_layers", + "n_words": "vocab_size", # For backward compatibility + } + + def __init__( + self, + pre_norm=False, + layerdrop=0.0, + vocab_size=30145, + emb_dim=2048, + n_layers=12, + n_heads=16, + dropout=0.1, + attention_dropout=0.1, + gelu_activation=True, + sinusoidal_embeddings=False, + causal=False, + asm=False, + n_langs=1, + use_lang_emb=True, + max_position_embeddings=512, + embed_init_std=2048**-0.5, + layer_norm_eps=1e-12, + init_std=0.02, + bos_index=0, + eos_index=1, + pad_index=2, + unk_index=3, + mask_index=5, + is_encoder=True, + summary_type="first", + summary_use_proj=True, + summary_activation=None, + summary_proj_to_labels=True, + summary_first_dropout=0.1, + start_n_top=5, + end_n_top=5, + mask_token_id=0, + lang_id=0, + pad_token_id=2, + bos_token_id=0, + **kwargs + ): """Constructs FlaubertConfig.""" - self.layerdrop = layerdrop self.pre_norm = pre_norm + self.layerdrop = layerdrop + self.vocab_size = vocab_size + self.emb_dim = emb_dim + self.n_layers = n_layers + self.n_heads = n_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.gelu_activation = gelu_activation + self.sinusoidal_embeddings = sinusoidal_embeddings + self.causal = causal + self.asm = asm + self.n_langs = n_langs + self.use_lang_emb = use_lang_emb + self.layer_norm_eps = layer_norm_eps + self.bos_index = bos_index + self.eos_index = eos_index + self.pad_index = pad_index + self.unk_index = unk_index + self.mask_index = mask_index + self.is_encoder = is_encoder + self.max_position_embeddings = max_position_embeddings + self.embed_init_std = embed_init_std + self.init_std = init_std + self.summary_type = summary_type + self.summary_use_proj = summary_use_proj + self.summary_activation = summary_activation + self.summary_proj_to_labels = summary_proj_to_labels + self.summary_first_dropout = summary_first_dropout + self.start_n_top = start_n_top + self.end_n_top = end_n_top + self.mask_token_id = mask_token_id + self.lang_id = lang_id + + if "n_words" in kwargs: + self.n_words = kwargs["n_words"] + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs) From 07e94bf1593424824cc9c6bf4a5e045d58f10707 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Wed, 5 Oct 2022 15:27:15 +0300 Subject: [PATCH 462/539] Maskformer post-processing fixes and improvements (#19172) - Improves MaskFormer docs, corrects minor typos - Restructures MaskFormerFeatureExtractor.post_process_panoptic_segmentation for better readability, adds target_sizes argument for optional resizing - Adds post_process_semantic_segmentation and post_process_instance_segmentation methods. - Adds a deprecation warning to post_process_segmentation method in favour of post_process_instance_segmentation --- docs/source/en/model_doc/maskformer.mdx | 1 + .../feature_extraction_maskformer.py | 423 +++++++++++++----- .../models/maskformer/modeling_maskformer.py | 11 +- .../test_feature_extraction_maskformer.py | 29 +- 4 files changed, 345 insertions(+), 119 deletions(-) diff --git a/docs/source/en/model_doc/maskformer.mdx b/docs/source/en/model_doc/maskformer.mdx index b06ed2cd56c6c5..34414dbd8f2cc0 100644 --- a/docs/source/en/model_doc/maskformer.mdx +++ b/docs/source/en/model_doc/maskformer.mdx @@ -58,6 +58,7 @@ This model was contributed by [francesco](https://huggingface.co/francesco). The - encode_inputs - post_process_segmentation - post_process_semantic_segmentation + - post_process_instance_segmentation - post_process_panoptic_segmentation ## MaskFormerModel diff --git a/src/transformers/models/maskformer/feature_extraction_maskformer.py b/src/transformers/models/maskformer/feature_extraction_maskformer.py index 8514bb26da2ac2..c86fce646b8ef8 100644 --- a/src/transformers/models/maskformer/feature_extraction_maskformer.py +++ b/src/transformers/models/maskformer/feature_extraction_maskformer.py @@ -35,6 +35,153 @@ logger = logging.get_logger(__name__) +def binary_mask_to_rle(mask): + """ + Converts given binary mask of shape (height, width) to the run-length encoding (RLE) format. + + Args: + mask (`torch.Tensor` or `numpy.array`): + A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target + segment_id or class_id. + + Returns: + `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE + format. + """ + if is_torch_tensor(mask): + mask = mask.numpy() + + pixels = mask.flatten() + pixels = np.concatenate([[0], pixels, [0]]) + runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 + runs[1::2] -= runs[::2] + return [x for x in runs] + + +def convert_segmentation_to_rle(segmentation): + """ + Converts given segmentation map of shape (height, width) to the run-length encoding (RLE) format. + + Args: + segmentation (`torch.Tensor` or `numpy.array`): + A segmentation map of shape `(height, width)` where each value denotes a segment or class id. + + Returns: + `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. + """ + segment_ids = torch.unique(segmentation) + + run_length_encodings = [] + for idx in segment_ids: + mask = torch.where(segmentation == idx, 1, 0) + rle = binary_mask_to_rle(mask) + run_length_encodings.append(rle) + + return run_length_encodings + + +def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): + """ + Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and + `labels`. + + Args: + masks (`torch.Tensor`): + A tensor of shape `(num_queries, height, width)`. + scores (`torch.Tensor`): + A tensor of shape `(num_queries)`. + labels (`torch.Tensor`): + A tensor of shape `(num_queries)`. + object_mask_threshold (`float`): + A number between 0 and 1 used to binarize the masks. + + Raises: + `ValueError`: Raised when the first dimension doesn't match in all input tensors. + + Returns: + `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region + < `object_mask_threshold`. + """ + if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): + raise ValueError("mask, scores and labels must have the same shape!") + + to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) + return masks[to_keep], scores[to_keep], labels[to_keep] + + +def check_segment_validity(mask_labels, mask_probs, k, overlap_mask_area_threshold=0.8): + # Get the mask associated with the k class + mask_k = mask_labels == k + mask_k_area = mask_k.sum() + + # Compute the area of all the stuff in query k + original_area = (mask_probs[k] >= 0.5).sum() + mask_exists = mask_k_area > 0 and original_area > 0 + + # Eliminate disconnected tiny segments + if mask_exists: + area_ratio = mask_k_area / original_area + if not area_ratio.item() > overlap_mask_area_threshold: + mask_exists = False + + return mask_exists, mask_k + + +def compute_segments( + mask_probs, + pred_scores, + pred_labels, + overlap_mask_area_threshold: float = 0.8, + label_ids_to_fuse: Optional[Set[int]] = None, + target_size: Tuple[int, int] = None, +): + height = mask_probs.shape[1] if target_size is None else target_size[0] + width = mask_probs.shape[2] if target_size is None else target_size[1] + + segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) + segments: List[Dict] = [] + + if target_size is not None: + mask_probs = interpolate(mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False)[0] + + current_segment_id = 0 + + # Weigh each mask by its prediction score + mask_probs *= pred_scores.view(-1, 1, 1) + mask_labels = mask_probs.argmax(0) # [height, width] + + # Keep track of instances of each class + stuff_memory_list: Dict[str, int] = {} + for k in range(pred_labels.shape[0]): + pred_class = pred_labels[k].item() + should_fuse = pred_class in label_ids_to_fuse + + # Check if mask exists and large enough to be a segment + mask_exists, mask_k = check_segment_validity(mask_labels, mask_probs, k, overlap_mask_area_threshold) + + if mask_exists: + if pred_class in stuff_memory_list: + current_segment_id = stuff_memory_list[pred_class] + else: + current_segment_id += 1 + + # Add current object segment to final segmentation map + segmentation[mask_k] = current_segment_id + segment_score = round(pred_scores[k].item(), 6) + segments.append( + { + "id": current_segment_id, + "label_id": pred_class, + "was_fused": should_fuse, + "score": segment_score, + } + ) + if should_fuse: + stuff_memory_list[pred_class] = current_segment_id + + return segmentation, segments + + class MaskFormerFeatureExtractor(FeatureExtractionMixin, ImageFeatureExtractionMixin): r""" Constructs a MaskFormer feature extractor. The feature extractor can be used to prepare image(s) and optional @@ -488,6 +635,12 @@ def post_process_segmentation( `torch.Tensor`: A tensor of shape (`batch_size, num_class_labels, height, width`). """ + logger.warning( + "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" + " `post_process_instance_segmentation`", + FutureWarning, + ) + # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] class_queries_logits = outputs.class_queries_logits # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH] @@ -512,59 +665,141 @@ def post_process_segmentation( return segmentation - def remove_low_and_no_objects(self, masks, scores, labels, object_mask_threshold, num_labels): + def post_process_semantic_segmentation( + self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None + ) -> "torch.Tensor": """ - Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` - and `labels`. + Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports + PyTorch. Args: - masks (`torch.Tensor`): - A tensor of shape `(num_queries, height, width)`. - scores (`torch.Tensor`): - A tensor of shape `(num_queries)`. - labels (`torch.Tensor`): - A tensor of shape `(num_queries)`. - object_mask_threshold (`float`): - A number between 0 and 1 used to binarize the masks. - - Raises: - `ValueError`: Raised when the first dimension doesn't match in all input tensors. - + outputs ([`MaskFormerForInstanceSegmentation`]): + Raw outputs of the model. + target_sizes (`List[Tuple[int, int]]`, *optional*, defaults to `None`): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: - `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the - region < `object_mask_threshold`. + `List[torch.Tensor]`: + A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) + corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each + `torch.Tensor` correspond to a semantic class id. """ - if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): - raise ValueError("mask, scores and labels must have the same shape!") + class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] + + # Remove the null class `[..., :-1]` + masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] + masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] - to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) + # Semantic segmentation logits of shape (batch_size, num_classes, height, width) + segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) + batch_size = class_queries_logits.shape[0] - return masks[to_keep], scores[to_keep], labels[to_keep] + # Resize logits and compute semantic segmentation maps + if target_sizes is not None: + if batch_size != len(target_sizes): + raise ValueError( + "Make sure that you pass in as many target sizes as the batch dimension of the logits" + ) - def post_process_semantic_segmentation( - self, outputs: "MaskFormerForInstanceSegmentationOutput", target_size: Tuple[int, int] = None - ) -> "torch.Tensor": + semantic_segmentation = [] + for idx in range(batch_size): + resized_logits = torch.nn.functional.interpolate( + segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False + ) + semantic_map = resized_logits[0].argmax(dim=0) + semantic_segmentation.append(semantic_map) + else: + semantic_segmentation = segmentation.argmax(dim=1) + semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] + + return semantic_segmentation + + def post_process_instance_segmentation( + self, + outputs, + threshold: float = 0.5, + overlap_mask_area_threshold: float = 0.8, + target_sizes: Optional[List[Tuple[int, int]]] = None, + return_coco_annotation: Optional[bool] = False, + ) -> List[Dict]: """ - Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into semantic segmentation predictions. Only + Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into instance segmentation predictions. Only supports PyTorch. Args: - outputs ([`MaskFormerForInstanceSegmentationOutput`]): - The outputs from [`MaskFormerForInstanceSegmentation`]. - + outputs ([`MaskFormerForInstanceSegmentation`]): + Raw outputs of the model. + threshold (`float`, *optional*, defaults to 0.5): + The probability score threshold to keep predicted instance masks. + overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): + The overlap mask area threshold to merge or discard small disconnected parts within each binary + instance mask. + target_sizes (`List[Tuple]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction. If left to None, predictions will not be resized. + return_coco_annotation (`bool`, *optional*): + Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) + format. Returns: - `torch.Tensor`: A tensor of shape `batch_size, height, width`. + `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: + - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or + `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to + `True`. Set to `None` if no mask if found above `threshold`. + - **segments_info** -- A dictionary that contains additional information on each segment. + - **id** -- An integer representing the `segment_id`. + - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. + - **score** -- Prediction score of segment with `segment_id`. """ - segmentation = self.post_process_segmentation(outputs, target_size) - semantic_segmentation = segmentation.argmax(dim=1) - return semantic_segmentation + class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] + + batch_size = class_queries_logits.shape[0] + num_labels = class_queries_logits.shape[-1] - 1 + + mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] + + # Predicted label and score of each query (batch_size, num_queries) + pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) + + # Loop over items in batch size + results: List[Dict[str, Tensor]] = [] + + for i in range(batch_size): + mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( + mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels + ) + + # No mask found + if mask_probs_item.shape[0] <= 0: + segmentation = None + segments: List[Dict] = [] + continue + + # Get segmentation map and segment information of batch item + target_size = target_sizes[i] if target_sizes is not None else None + segmentation, segments = compute_segments( + mask_probs_item, + pred_scores_item, + pred_labels_item, + overlap_mask_area_threshold, + target_size, + ) + + # Return segmentation map in run-length encoding (RLE) format + if return_coco_annotation: + segmentation = convert_segmentation_to_rle(segmentation) + + results.append({"segmentation": segmentation, "segments_info": segments}) + return results def post_process_panoptic_segmentation( self, - outputs: "MaskFormerForInstanceSegmentationOutput", - object_mask_threshold: float = 0.8, + outputs, + threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, + target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation @@ -573,94 +808,72 @@ def post_process_panoptic_segmentation( Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. - object_mask_threshold (`float`, *optional*, defaults to 0.8): - The object mask threshold. + threshold (`float`, *optional*, defaults to 0.5): + The probability score threshold to keep predicted instance masks. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): - The overlap mask area threshold to use. + The overlap mask area threshold to merge or discard small disconnected parts within each binary + instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. + target_sizes (`List[Tuple]`, *optional*): + List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested + final size (height, width) of each prediction in batch. If left to None, predictions will not be + resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`. - - **segments** -- a dictionary with the following keys + - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set + to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized + to the corresponding `target_sizes` entry. + - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - - **label_id** -- an integer representing the segment's label. + - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. + Multiple instances of the same class / label were fused and assigned a single `segment_id`. + - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() - # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] - class_queries_logits = outputs.class_queries_logits - # keep track of the number of labels, subtract -1 for null class + + class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] + masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] + + batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 - # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH] - masks_queries_logits = outputs.masks_queries_logits - # since all images are padded, they all have the same spatial dimensions - _, _, height, width = masks_queries_logits.shape - # for each query, the best scores and their indeces + + mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] + + # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) - # pred_scores and pred_labels shape = [BATH,NUM_QUERIES] - mask_probs = masks_queries_logits.sigmoid() - # mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH] - # now, we need to iterate over the batch size to correctly process the segmentation we got from the queries using our thresholds. Even if the original predicted masks have the same shape across the batch, they won't after thresholding so batch-wise operations are impossible + + # Loop over items in batch size results: List[Dict[str, Tensor]] = [] - for mask_probs, pred_scores, pred_labels in zip(mask_probs, pred_scores, pred_labels): - mask_probs, pred_scores, pred_labels = self.remove_low_and_no_objects( - mask_probs, pred_scores, pred_labels, object_mask_threshold, num_labels + + for i in range(batch_size): + mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( + mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) - we_detect_something = mask_probs.shape[0] > 0 - - segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) - segments: List[Dict] = [] - - if we_detect_something: - current_segment_id = 0 - # weight each mask by its score - mask_probs *= pred_scores.view(-1, 1, 1) - # find out for each pixel what is the most likely class to be there - mask_labels = mask_probs.argmax(0) - # mask_labels shape = [H,W] where each pixel has a class label - stuff_memory_list: Dict[str, int] = {} - # this is a map between stuff and segments id, the used it to keep track of the instances of one class - for k in range(pred_labels.shape[0]): - pred_class = pred_labels[k].item() - # check if pred_class should be fused. For example, class "sky" cannot have more then one instance - should_fuse = pred_class in label_ids_to_fuse - # get the mask associated with the k class - mask_k = mask_labels == k - # create the area, since bool we just need to sum :) - mask_k_area = mask_k.sum() - # this is the area of all the stuff in query k - original_area = (mask_probs[k] >= 0.5).sum() - - mask_exists = mask_k_area > 0 and original_area > 0 - - if mask_exists: - # find out how much of the all area mask_k is using - area_ratio = mask_k_area / original_area - mask_k_is_overlapping_enough = area_ratio.item() > overlap_mask_area_threshold - - if mask_k_is_overlapping_enough: - # merge stuff regions - if pred_class in stuff_memory_list: - current_segment_id = stuff_memory_list[pred_class] - else: - current_segment_id += 1 - # then we update out mask with the current segment - segmentation[mask_k] = current_segment_id - segments.append( - { - "id": current_segment_id, - "label_id": pred_class, - "was_fused": should_fuse, - } - ) - if should_fuse: - stuff_memory_list[pred_class] = current_segment_id - results.append({"segmentation": segmentation, "segments": segments}) + + # No mask found + if mask_probs_item.shape[0] <= 0: + segmentation = None + segments: List[Dict] = [] + continue + + # Get segmentation map and segment information of batch item + target_size = target_sizes[i] if target_sizes is not None else None + segmentation, segments = compute_segments( + mask_probs_item, + pred_scores_item, + pred_labels_item, + overlap_mask_area_threshold, + label_ids_to_fuse, + target_size, + ) + + results.append({"segmentation": segmentation, "segments_info": segments}) return results diff --git a/src/transformers/models/maskformer/modeling_maskformer.py b/src/transformers/models/maskformer/modeling_maskformer.py index 1b37fdd27c568b..882c8bb025d290 100644 --- a/src/transformers/models/maskformer/modeling_maskformer.py +++ b/src/transformers/models/maskformer/modeling_maskformer.py @@ -259,7 +259,8 @@ class MaskFormerForInstanceSegmentationOutput(ModelOutput): """ Class for outputs of [`MaskFormerForInstanceSegmentation`]. - This output can be directly passed to [`~MaskFormerFeatureExtractor.post_process_segmentation`] or + This output can be directly passed to [`~MaskFormerFeatureExtractor.post_process_semantic_segmentation`] or or + [`~MaskFormerFeatureExtractor.post_process_instance_segmentation`] or [`~MaskFormerFeatureExtractor.post_process_panoptic_segmentation`] depending on the task. Please, see [`~MaskFormerFeatureExtractor] for details regarding usage. @@ -267,11 +268,11 @@ class MaskFormerForInstanceSegmentationOutput(ModelOutput): loss (`torch.Tensor`, *optional*): The computed loss, returned when labels are present. class_queries_logits (`torch.FloatTensor`): - A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each - query. - masks_queries_logits (`torch.FloatTensor`): A tensor of shape `(batch_size, num_queries, num_labels + 1)` representing the proposed classes for each query. Note the `+ 1` is needed because we incorporate the null class. + masks_queries_logits (`torch.FloatTensor`): + A tensor of shape `(batch_size, num_queries, height, width)` representing the proposed masks for each + query. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Last hidden states (final feature map) of the last stage of the encoder model (backbone). pixel_decoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): @@ -2547,8 +2548,8 @@ def forward( >>> masks_queries_logits = outputs.masks_queries_logits >>> # you can pass them to feature_extractor for postprocessing - >>> output = feature_extractor.post_process_segmentation(outputs) >>> output = feature_extractor.post_process_semantic_segmentation(outputs) + >>> output = feature_extractor.post_process_instance_segmentation(outputs) >>> output = feature_extractor.post_process_panoptic_segmentation(outputs) ``` """ diff --git a/tests/models/maskformer/test_feature_extraction_maskformer.py b/tests/models/maskformer/test_feature_extraction_maskformer.py index 461add8c035565..41ff2d10aa524a 100644 --- a/tests/models/maskformer/test_feature_extraction_maskformer.py +++ b/tests/models/maskformer/test_feature_extraction_maskformer.py @@ -29,6 +29,7 @@ if is_vision_available(): from transformers import MaskFormerFeatureExtractor + from transformers.models.maskformer.feature_extraction_maskformer import binary_mask_to_rle from transformers.models.maskformer.modeling_maskformer import MaskFormerForInstanceSegmentationOutput if is_vision_available(): @@ -344,6 +345,17 @@ def common(is_instance_map=False, segmentation_type=None): common(is_instance_map=False, segmentation_type="pil") common(is_instance_map=True, segmentation_type="pil") + def test_binary_mask_to_rle(self): + fake_binary_mask = np.zeros((20, 50)) + fake_binary_mask[0, 20:] = 1 + fake_binary_mask[1, :15] = 1 + fake_binary_mask[5, :10] = 1 + + rle = binary_mask_to_rle(fake_binary_mask) + self.assertEqual(len(rle), 4) + self.assertEqual(rle[0], 21) + self.assertEqual(rle[1], 45) + def test_post_process_segmentation(self): fature_extractor = self.feature_extraction_class(num_labels=self.feature_extract_tester.num_classes) outputs = self.feature_extract_tester.get_fake_maskformer_outputs() @@ -373,31 +385,30 @@ def test_post_process_semantic_segmentation(self): segmentation = fature_extractor.post_process_semantic_segmentation(outputs) + self.assertEqual(len(segmentation), self.feature_extract_tester.batch_size) self.assertEqual( - segmentation.shape, + segmentation[0].shape, ( - self.feature_extract_tester.batch_size, self.feature_extract_tester.height, self.feature_extract_tester.width, ), ) - target_size = (1, 4) - - segmentation = fature_extractor.post_process_semantic_segmentation(outputs, target_size=target_size) + target_sizes = [(1, 4) for i in range(self.feature_extract_tester.batch_size)] + segmentation = fature_extractor.post_process_semantic_segmentation(outputs, target_sizes=target_sizes) - self.assertEqual(segmentation.shape, (self.feature_extract_tester.batch_size, *target_size)) + self.assertEqual(segmentation[0].shape, target_sizes[0]) def test_post_process_panoptic_segmentation(self): fature_extractor = self.feature_extraction_class(num_labels=self.feature_extract_tester.num_classes) outputs = self.feature_extract_tester.get_fake_maskformer_outputs() - segmentation = fature_extractor.post_process_panoptic_segmentation(outputs, object_mask_threshold=0) + segmentation = fature_extractor.post_process_panoptic_segmentation(outputs, threshold=0) self.assertTrue(len(segmentation) == self.feature_extract_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) - self.assertTrue("segments" in el) - self.assertEqual(type(el["segments"]), list) + self.assertTrue("segments_info" in el) + self.assertEqual(type(el["segments_info"]), list) self.assertEqual( el["segmentation"].shape, (self.feature_extract_tester.height, self.feature_extract_tester.width) ) From 512fa41c53cd1cc0dad612119dc96365e9833d4f Mon Sep 17 00:00:00 2001 From: Divyanshu Kumar <53843818+divyanshugit@users.noreply.github.com> Date: Wed, 5 Oct 2022 18:42:47 +0530 Subject: [PATCH 463/539] Removed interdependency of BERT's Tokenizer in tokenization of prophetnet (#19331) * removed interdependency of BERTTokenizer in tokenization of prophetnet * fix: style --- .../prophetnet/tokenization_prophetnet.py | 222 +++++++++++++++++- 1 file changed, 220 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/prophetnet/tokenization_prophetnet.py b/src/transformers/models/prophetnet/tokenization_prophetnet.py index f18bf9d3360386..05e03ad4881a0a 100644 --- a/src/transformers/models/prophetnet/tokenization_prophetnet.py +++ b/src/transformers/models/prophetnet/tokenization_prophetnet.py @@ -15,11 +15,11 @@ import collections import os +import unicodedata from typing import Iterable, List, Optional, Tuple -from ...tokenization_utils import PreTrainedTokenizer +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging -from ..bert.tokenization_bert import BasicTokenizer, WordpieceTokenizer logger = logging.get_logger(__name__) @@ -43,6 +43,224 @@ } +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + + For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through *BasicTokenizer*. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens + + def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() From e12bbe3b4d4acab06a88b8e05fa6aecfe565b33a Mon Sep 17 00:00:00 2001 From: Shyam Sudhakaran Date: Wed, 5 Oct 2022 06:15:14 -0700 Subject: [PATCH 464/539] Remove bert interdependency from clip tokenizer (#19332) --- .../models/clip/tokenization_clip.py | 164 +++++++++++++++++- 1 file changed, 162 insertions(+), 2 deletions(-) diff --git a/src/transformers/models/clip/tokenization_clip.py b/src/transformers/models/clip/tokenization_clip.py index c6870cc69f5526..29707ee58177d8 100644 --- a/src/transformers/models/clip/tokenization_clip.py +++ b/src/transformers/models/clip/tokenization_clip.py @@ -16,13 +16,13 @@ import json import os +import unicodedata from functools import lru_cache from typing import List, Optional, Tuple import regex as re -from transformers.models.bert.tokenization_bert import BasicTokenizer -from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging @@ -97,6 +97,166 @@ def whitespace_clean(text): return text +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + class CLIPTokenizer(PreTrainedTokenizer): """ Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding. From c54bb1ad79b596d19799b6160171cf631f8e1d73 Mon Sep 17 00:00:00 2001 From: Druhin Abrol Date: Wed, 5 Oct 2022 18:49:04 +0530 Subject: [PATCH 465/539] [WIP]remove XLMTokenizer inheritance from FlaubertTokenizer (#19330) * remove XLMTokenizer inheritance from FlaubertTokenizer * remove XLMTokenizer inheritance from FlaubertTokenizer * remove XLMTokenizer inheritance from FlaubertTokenizer * remove XLMTokenizer inheritance from FlaubertTokenizer: fixed styling * removed repo-consistensy issue --- .../models/flaubert/tokenization_flaubert.py | 462 +++++++++++++++++- 1 file changed, 454 insertions(+), 8 deletions(-) diff --git a/src/transformers/models/flaubert/tokenization_flaubert.py b/src/transformers/models/flaubert/tokenization_flaubert.py index 911ef37dac5046..5bc2c59342772a 100644 --- a/src/transformers/models/flaubert/tokenization_flaubert.py +++ b/src/transformers/models/flaubert/tokenization_flaubert.py @@ -12,13 +12,17 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""Tokenization classes for Flaubert, based on XLM.""" +"""Tokenization classes for Flaubert.""" +import json +import os +import re import unicodedata +from typing import List, Optional, Tuple +from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging -from ..xlm.tokenization_xlm import XLMTokenizer logger = logging.get_logger(__name__) @@ -86,7 +90,79 @@ def ensure_text(s, encoding="utf-8", errors="strict"): return ensure_text(text, encoding="utf-8", errors="ignore") -class FlaubertTokenizer(XLMTokenizer): +# Copied from transformers.models.xlm.tokenization_xlm.get_pairs +def get_pairs(word): + """ + Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length + strings) + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +# Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct +def replace_unicode_punct(text): + """ + Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl + """ + text = text.replace(",", ",") + text = re.sub(r"。\s*", ". ", text) + text = text.replace("、", ",") + text = text.replace("”", '"') + text = text.replace("“", '"') + text = text.replace("∶", ":") + text = text.replace(":", ":") + text = text.replace("?", "?") + text = text.replace("《", '"') + text = text.replace("》", '"') + text = text.replace(")", ")") + text = text.replace("!", "!") + text = text.replace("(", "(") + text = text.replace(";", ";") + text = text.replace("1", "1") + text = text.replace("」", '"') + text = text.replace("「", '"') + text = text.replace("0", "0") + text = text.replace("3", "3") + text = text.replace("2", "2") + text = text.replace("5", "5") + text = text.replace("6", "6") + text = text.replace("9", "9") + text = text.replace("7", "7") + text = text.replace("8", "8") + text = text.replace("4", "4") + text = re.sub(r".\s*", ". ", text) + text = text.replace("~", "~") + text = text.replace("’", "'") + text = text.replace("…", "...") + text = text.replace("━", "-") + text = text.replace("〈", "<") + text = text.replace("〉", ">") + text = text.replace("【", "[") + text = text.replace("】", "]") + text = text.replace("%", "%") + return text + + +# Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char +def remove_non_printing_char(text): + """ + Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl + """ + output = [] + for char in text: + cat = unicodedata.category(char) + if cat.startswith("C"): + continue + output.append(char) + return "".join(output) + + +class FlaubertTokenizer(PreTrainedTokenizer): """ Construct a Flaubert tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following: @@ -96,8 +172,49 @@ class FlaubertTokenizer(XLMTokenizer): "__classify__") to a vocabulary. - The argument `do_lowercase` controls lower casing (automatically set for pretrained vocabularies). - This tokenizer inherits from [`XLMTokenizer`]. Please check the superclass for usage examples and documentation - regarding arguments. + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Vocabulary file. + merges_file (`str`): + Merges file. + do_lowercase (`bool`, *optional*, defaults to `False`): + Controls lower casing. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + additional_special_tokens (`List[str]`, *optional*, defaults to `["","","","","","","","","",""]`): + List of additional special tokens. + lang2id (`Dict[str, int]`, *optional*): + Dictionary mapping languages string identifiers to their IDs. + id2lang (`Dict[int, str]`, *optional*): + Dictionary mapping language IDs to their string identifiers. + do_lowercase_and_remove_accent (`bool`, *optional*, defaults to `True`): + Whether to lowercase and remove accents when tokenizing. """ vocab_files_names = VOCAB_FILES_NAMES @@ -105,10 +222,187 @@ class FlaubertTokenizer(XLMTokenizer): pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES - def __init__(self, do_lowercase=False, **kwargs): - super().__init__(do_lowercase=do_lowercase, **kwargs) - self.do_lowercase = do_lowercase + def __init__( + self, + vocab_file, + merges_file, + do_lowercase=False, + unk_token="", + bos_token="", + sep_token="", + pad_token="", + cls_token="", + mask_token="", + additional_special_tokens=[ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + ], + lang2id=None, + id2lang=None, + do_lowercase_and_remove_accent=True, + **kwargs + ): + super().__init__( + unk_token=unk_token, + bos_token=bos_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + additional_special_tokens=additional_special_tokens, + lang2id=lang2id, + id2lang=id2lang, + do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, + do_lowercase=do_lowercase**kwargs, + ) + + try: + import sacremoses + except ImportError: + raise ImportError( + "You need to install sacremoses to use FlaubertTokenizer. " + "See https://pypi.org/project/sacremoses/ for installation." + ) + + self.sm = sacremoses + + # cache of sm.MosesPunctNormalizer instance + self.cache_moses_punct_normalizer = dict() + # cache of sm.MosesTokenizer instance + self.cache_moses_tokenizer = dict() + self.lang_with_custom_tokenizer = set(["zh", "th", "ja"]) + # True for current supported model (v1.2.0), False for XLM-17 & 100 self.do_lowercase_and_remove_accent = False + self.lang2id = lang2id + self.id2lang = id2lang + if lang2id is not None and id2lang is not None: + assert len(lang2id) == len(id2lang) + + self.ja_word_tokenizer = None + self.zh_word_tokenizer = None + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + merges = merges_handle.read().split("\n")[:-1] + merges = [tuple(merge.split()[:2]) for merge in merges] + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {} + + @property + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case + def do_lower_case(self): + return self.do_lowercase_and_remove_accent + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm + def moses_punct_norm(self, text, lang): + if lang not in self.cache_moses_punct_normalizer: + punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) + self.cache_moses_punct_normalizer[lang] = punct_normalizer + else: + punct_normalizer = self.cache_moses_punct_normalizer[lang] + return punct_normalizer.normalize(text) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize + def moses_tokenize(self, text, lang): + if lang not in self.cache_moses_tokenizer: + moses_tokenizer = self.sm.MosesTokenizer(lang=lang) + self.cache_moses_tokenizer[lang] = moses_tokenizer + else: + moses_tokenizer = self.cache_moses_tokenizer[lang] + return moses_tokenizer.tokenize(text, return_str=False, escape=False) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline + def moses_pipeline(self, text, lang): + text = replace_unicode_punct(text) + text = self.moses_punct_norm(text, lang) + text = remove_non_printing_char(text) + return text + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize + def ja_tokenize(self, text): + if self.ja_word_tokenizer is None: + try: + import Mykytea + + self.ja_word_tokenizer = Mykytea.Mykytea( + f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin" + ) + except (AttributeError, ImportError): + logger.error( + "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper" + " (https://github.com/chezou/Mykytea-python) with the following steps" + ) + logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea") + logger.error("2. autoreconf -i") + logger.error("3. ./configure --prefix=$HOME/local") + logger.error("4. make && make install") + logger.error("5. pip install kytea") + raise + return list(self.ja_word_tokenizer.getWS(text)) + + @property + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size + def vocab_size(self): + return len(self.encoder) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe + def bpe(self, token): + word = tuple(token[:-1]) + (token[-1] + "",) + if token in self.cache: + return self.cache[token] + pairs = get_pairs(word) + + if not pairs: + return token + "" + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + if word == "\n ": + word = "\n" + self.cache[token] = word + return word def preprocess_text(self, text): text = text.replace("``", '"').replace("''", '"') @@ -156,3 +450,155 @@ def _tokenize(self, text, bypass_tokenizer=False): split_tokens.extend([t for t in self.bpe(token).split(" ")]) return split_tokens + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index, self.unk_token) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = "".join(tokens).replace("", " ").strip() + return out_string + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. An XLM sequence has the following format: + + - single sequence: ` X ` + - pair of sequences: ` A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + + """ + bos = [self.bos_token_id] + sep = [self.sep_token_id] + + if token_ids_1 is None: + return bos + token_ids_0 + sep + return bos + token_ids_0 + sep + token_ids_1 + sep + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__ + def __getstate__(self): + state = self.__dict__.copy() + state["sm"] = None + return state + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__ + def __setstate__(self, d): + self.__dict__ = d + + try: + import sacremoses + except ImportError: + raise ImportError( + "You need to install sacremoses to use XLMTokenizer. " + "See https://pypi.org/project/sacremoses/ for installation." + ) + + self.sm = sacremoses From 60db81ff60c4b06271bebd1f331d21d66b3fefad Mon Sep 17 00:00:00 2001 From: mustapha ajeghrir <66799406+Mustapha-AJEGHRIR@users.noreply.github.com> Date: Wed, 5 Oct 2022 15:31:33 +0200 Subject: [PATCH 466/539] Making camembert independent from roberta, clean (#19337) Co-authored-by: Mustapha AJEGHRIR --- src/transformers/__init__.py | 2 + src/transformers/models/camembert/__init__.py | 2 + .../models/camembert/modeling_camembert.py | 1521 ++++++++++++++++- src/transformers/utils/dummy_pt_objects.py | 7 + 4 files changed, 1484 insertions(+), 48 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index b3c6cca623ef05..448300774b8261 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -1024,6 +1024,7 @@ "CamembertForSequenceClassification", "CamembertForTokenClassification", "CamembertModel", + "CamembertPreTrainedModel", ] ) _import_structure["models.canine"].extend( @@ -3873,6 +3874,7 @@ CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, + CamembertPreTrainedModel, ) from .models.canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, diff --git a/src/transformers/models/camembert/__init__.py b/src/transformers/models/camembert/__init__.py index c91683d1cde4d6..40aa94e2e45a9b 100644 --- a/src/transformers/models/camembert/__init__.py +++ b/src/transformers/models/camembert/__init__.py @@ -63,6 +63,7 @@ "CamembertForSequenceClassification", "CamembertForTokenClassification", "CamembertModel", + "CamembertPreTrainedModel", ] try: @@ -117,6 +118,7 @@ CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, + CamembertPreTrainedModel, ) try: diff --git a/src/transformers/models/camembert/modeling_camembert.py b/src/transformers/models/camembert/modeling_camembert.py index 8d38e7c6620144..58ea0549857fed 100644 --- a/src/transformers/models/camembert/modeling_camembert.py +++ b/src/transformers/models/camembert/modeling_camembert.py @@ -15,21 +15,41 @@ # limitations under the License. """PyTorch CamemBERT model.""" -from ...utils import add_start_docstrings, logging -from ..roberta.modeling_roberta import ( - RobertaForCausalLM, - RobertaForMaskedLM, - RobertaForMultipleChoice, - RobertaForQuestionAnswering, - RobertaForSequenceClassification, - RobertaForTokenClassification, - RobertaModel, +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN, gelu +from ...modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, ) from .configuration_camembert import CamembertConfig logger = logging.get_logger(__name__) +_CHECKPOINT_FOR_DOC = "camembert-base" +_CONFIG_FOR_DOC = "CamembertConfig" _TOKENIZER_FOR_DOC = "CamembertTokenizer" CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ @@ -56,30 +76,948 @@ """ +# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->Camembert +class CamembertEmbeddings(nn.Module): + """ + Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. + """ + + # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) + + # End copy + self.padding_idx = config.pad_token_id + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx + ) + + def forward( + self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 + ): + if position_ids is None: + if input_ids is not None: + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) + else: + position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) + + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + def create_position_ids_from_inputs_embeds(self, inputs_embeds): + """ + We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. + + Args: + inputs_embeds: torch.Tensor + + Returns: torch.Tensor + """ + input_shape = inputs_embeds.size()[:-1] + sequence_length = input_shape[1] + + position_ids = torch.arange( + self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device + ) + return position_ids.unsqueeze(0).expand(input_shape) + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->Camembert +class CamembertSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in CamembertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput with Roberta->Camembert +class CamembertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaAttention with Roberta->Camembert +class CamembertAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = CamembertSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = CamembertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Roberta->Camembert +class CamembertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Roberta->Camembert +class CamembertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaLayer with Roberta->Camembert +class CamembertLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = CamembertAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = CamembertAttention(config, position_embedding_type="absolute") + self.intermediate = CamembertIntermediate(config) + self.output = CamembertOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->Camembert +class CamembertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([CamembertLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler +class CamembertPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class CamembertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = CamembertConfig + base_model_prefix = "roberta" + supports_gradient_checkpointing = True + + # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, CamembertEncoder): + module.gradient_checkpointing = value + + def update_keys_to_ignore(self, config, del_keys_to_ignore): + """Remove some keys from ignore list""" + if not config.tie_word_embeddings: + # must make a new list, or the class variable gets modified! + self._keys_to_ignore_on_save = [k for k in self._keys_to_ignore_on_save if k not in del_keys_to_ignore] + self._keys_to_ignore_on_load_missing = [ + k for k in self._keys_to_ignore_on_load_missing if k not in del_keys_to_ignore + ] + + +CAMEMBERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`CamembertTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Camembert +class CamembertClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.out_proj = nn.Linear(config.hidden_size, config.num_labels) + + def forward(self, features, **kwargs): + x = features[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x) + x = self.dense(x) + x = torch.tanh(x) + x = self.dropout(x) + x = self.out_proj(x) + return x + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Camembert +class CamembertLMHead(nn.Module): + """Camembert Head for masked language modeling.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + self.decoder = nn.Linear(config.hidden_size, config.vocab_size) + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + self.decoder.bias = self.bias + + def forward(self, features, **kwargs): + x = self.dense(features) + x = gelu(x) + x = self.layer_norm(x) + + # project back to size of vocabulary with bias + x = self.decoder(x) + + return x + + def _tie_weights(self): + # To tie those two weights if they get disconnected (on TPU or when the bias is resized) + self.bias = self.decoder.bias + + @add_start_docstrings( "The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.", CAMEMBERT_START_DOCSTRING, ) -class CamembertModel(RobertaModel): +class CamembertModel(CamembertPreTrainedModel): """ - This class overrides [`RobertaModel`]. Please check the superclass for the appropriate documentation alongside - usage examples. + + The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of + cross-attention is added between the self-attention layers, following the architecture described in *Attention is + all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz + Kaiser and Illia Polosukhin. + + To behave as a decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to + `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and + `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. + + .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 + """ - config_class = CamembertConfig + _keys_to_ignore_on_load_missing = [r"position_ids"] + + # Copied from transformers.models.bert.modeling_bert.BertModel.__init__ with Bert->Camembert + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + self.embeddings = CamembertEmbeddings(config) + self.encoder = CamembertEncoder(config) + + self.pooler = CamembertPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + # Copied from transformers.models.bert.modeling_bert.BertModel.forward + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.config.is_decoder: + use_cache = use_cache if use_cache is not None else self.config.use_cache + else: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.is_decoder and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() + encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) + encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) @add_start_docstrings( """CamemBERT Model with a `language modeling` head on top.""", CAMEMBERT_START_DOCSTRING, ) -class CamembertForMaskedLM(RobertaForMaskedLM): - """ - This class overrides [`RobertaForMaskedLM`]. Please check the superclass for the appropriate documentation - alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM with Roberta->Camembert, ROBERTA->CAMEMBERT +class CamembertForMaskedLM(CamembertPreTrainedModel): + _keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"] + _keys_to_ignore_on_load_unexpected = [r"pooler"] - config_class = CamembertConfig + def __init__(self, config): + super().__init__(config) + + if config.is_decoder: + logger.warning( + "If you want to use `CamembertForMaskedLM` make sure `config.is_decoder=False` for " + "bi-directional self-attention." + ) + + self.roberta = CamembertModel(config, add_pooling_layer=False) + self.lm_head = CamembertLMHead(config) + + # The LM head weights require special treatment only when they are tied with the word embeddings + self.update_keys_to_ignore(config, ["lm_head.decoder.weight"]) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head.decoder + + def set_output_embeddings(self, new_embeddings): + self.lm_head.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + mask="", + expected_output="' Paris'", + expected_loss=0.1, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + kwargs (`Dict[str, any]`, optional, defaults to *{}*): + Used to hide legacy arguments that have been deprecated. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + prediction_scores = self.lm_head(sequence_output) + + masked_lm_loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output + + return MaskedLMOutput( + loss=masked_lm_loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) @add_start_docstrings( @@ -89,13 +1027,98 @@ class CamembertForMaskedLM(RobertaForMaskedLM): """, CAMEMBERT_START_DOCSTRING, ) -class CamembertForSequenceClassification(RobertaForSequenceClassification): - """ - This class overrides [`RobertaForSequenceClassification`]. Please check the superclass for the appropriate - documentation alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_roberta.RobertaForSequenceClassification with Roberta->Camembert, ROBERTA->CAMEMBERT +class CamembertForSequenceClassification(CamembertPreTrainedModel): + _keys_to_ignore_on_load_missing = [r"position_ids"] - config_class = CamembertConfig + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + + self.roberta = CamembertModel(config, add_pooling_layer=False) + self.classifier = CamembertClassificationHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="cardiffnlp/twitter-roberta-base-emotion", + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="'optimism'", + expected_loss=0.08, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = outputs[0] + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) @add_start_docstrings( @@ -105,13 +1128,93 @@ class CamembertForSequenceClassification(RobertaForSequenceClassification): """, CAMEMBERT_START_DOCSTRING, ) -class CamembertForMultipleChoice(RobertaForMultipleChoice): - """ - This class overrides [`RobertaForMultipleChoice`]. Please check the superclass for the appropriate documentation - alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_roberta.RobertaForMultipleChoice with Roberta->Camembert, ROBERTA->CAMEMBERT +class CamembertForMultipleChoice(CamembertPreTrainedModel): + _keys_to_ignore_on_load_missing = [r"position_ids"] - config_class = CamembertConfig + def __init__(self, config): + super().__init__(config) + + self.roberta = CamembertModel(config) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") + ) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + flat_inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.roberta( + flat_input_ids, + position_ids=flat_position_ids, + token_type_ids=flat_token_type_ids, + attention_mask=flat_attention_mask, + head_mask=head_mask, + inputs_embeds=flat_inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + pooled_output = outputs[1] + + pooled_output = self.dropout(pooled_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) @add_start_docstrings( @@ -121,13 +1224,85 @@ class CamembertForMultipleChoice(RobertaForMultipleChoice): """, CAMEMBERT_START_DOCSTRING, ) -class CamembertForTokenClassification(RobertaForTokenClassification): - """ - This class overrides [`RobertaForTokenClassification`]. Please check the superclass for the appropriate - documentation alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_roberta.RobertaForTokenClassification with Roberta->Camembert, ROBERTA->CAMEMBERT +class CamembertForTokenClassification(CamembertPreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids"] - config_class = CamembertConfig + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.roberta = CamembertModel(config, add_pooling_layer=False) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="Jean-Baptiste/roberta-large-ner-english", + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']", + expected_loss=0.01, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) @add_start_docstrings( @@ -137,22 +1312,272 @@ class CamembertForTokenClassification(RobertaForTokenClassification): """, CAMEMBERT_START_DOCSTRING, ) -class CamembertForQuestionAnswering(RobertaForQuestionAnswering): - """ - This class overrides [`RobertaForQuestionAnswering`]. Please check the superclass for the appropriate documentation - alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_roberta.RobertaForQuestionAnswering with Roberta->Camembert, ROBERTA->CAMEMBERT +class CamembertForQuestionAnswering(CamembertPreTrainedModel): + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids"] - config_class = CamembertConfig + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.roberta = CamembertModel(config, add_pooling_layer=False) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="deepset/roberta-base-squad2", + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="' puppet'", + expected_loss=0.86, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) @add_start_docstrings( """CamemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", CAMEMBERT_START_DOCSTRING ) -class CamembertForCausalLM(RobertaForCausalLM): - """ - This class overrides [`RobertaForCausalLM`]. Please check the superclass for the appropriate documentation - alongside usage examples. +# Copied from transformers.models.roberta.modeling_roberta.RobertaForCausalLM with Roberta->Camembert, ROBERTA->CAMEMBERT, roberta-base->camembert-base +class CamembertForCausalLM(CamembertPreTrainedModel): + _keys_to_ignore_on_save = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"lm_head.decoder.weight", r"lm_head.decoder.bias"] + _keys_to_ignore_on_load_unexpected = [r"pooler"] + + def __init__(self, config): + super().__init__(config) + + if not config.is_decoder: + logger.warning("If you want to use `CamembertLMHeadModel` as a standalone, add `is_decoder=True.`") + + self.roberta = CamembertModel(config, add_pooling_layer=False) + self.lm_head = CamembertLMHead(config) + + # The LM head weights require special treatment only when they are tied with the word embeddings + self.update_keys_to_ignore(config, ["lm_head.decoder.weight"]) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head.decoder + + def set_output_embeddings(self, new_embeddings): + self.lm_head.decoder = new_embeddings + + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + past_key_values: Tuple[Tuple[torch.FloatTensor]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + Returns: + + Example: + + ```python + >>> from transformers import CamembertTokenizer, CamembertForCausalLM, CamembertConfig + >>> import torch + + >>> tokenizer = CamembertTokenizer.from_pretrained("camembert-base") + >>> config = CamembertConfig.from_pretrained("camembert-base") + >>> config.is_decoder = True + >>> model = CamembertForCausalLM.from_pretrained("camembert-base", config=config) + + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> prediction_logits = outputs.logits + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + if labels is not None: + use_cache = False + + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + prediction_scores = self.lm_head(sequence_output) + + lm_loss = None + if labels is not None: + # we are doing next-token prediction; shift prediction scores and input ids by one + shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() + labels = labels[:, 1:].contiguous() + loss_fct = CrossEntropyLoss() + lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((lm_loss,) + output) if lm_loss is not None else output + + return CausalLMOutputWithCrossAttentions( + loss=lm_loss, + logits=prediction_scores, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} + + def _reorder_cache(self, past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past + + +# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids +def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. - config_class = CamembertConfig + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask + return incremental_indices.long() + padding_idx diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index f3583e1b61efdc..d1addf8d4c1c6d 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -1093,6 +1093,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class CamembertPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + CANINE_PRETRAINED_MODEL_ARCHIVE_LIST = None From 2f53ab5745cee94e7d8bb2c8a097f4f2edef15ed Mon Sep 17 00:00:00 2001 From: r-terada Date: Thu, 6 Oct 2022 00:41:37 +0900 Subject: [PATCH 467/539] Add sudachi and jumanpp tokenizers for bert_japanese (#19043) * add sudachipy and jumanpp tokenizers for bert_japanese * use ImportError instead of ModuleNotFoundError in SudachiTokenizer and JumanppTokenizer * put test cases of test_tokenization_bert_japanese in one line * add require_sudachi and require_jumanpp decorator for testing * add sudachi and pyknp(jumanpp) to dependencies * remove sudachi_dict_small and sudachi_dict_full from dependencies * empty commit for ci --- .circleci/config.yml | 10 + setup.py | 5 +- src/transformers/dependency_versions_table.py | 3 + .../tokenization_bert_japanese.py | 187 +++++++++++++++++- src/transformers/testing_utils.py | 16 ++ src/transformers/utils/__init__.py | 2 + src/transformers/utils/import_utils.py | 9 + .../test_tokenization_bert_japanese.py | 148 +++++++++++++- 8 files changed, 373 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 06c621621f6a2d..aef10586cdc56b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -409,6 +409,16 @@ jobs: keys: - v0.5-custom_tokenizers-{{ checksum "setup.py" }} - v0.5-custom_tokenizers- + - run: sudo apt-get -y update && sudo apt-get install -y cmake + - run: + name: install jumanpp + command: | + wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc3/jumanpp-2.0.0-rc3.tar.xz + tar xvf jumanpp-2.0.0-rc3.tar.xz + mkdir jumanpp-2.0.0-rc3/bld + cd jumanpp-2.0.0-rc3/bld + sudo cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local + sudo make install - run: pip install --upgrade pip - run: pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba] - run: python -m unidic download diff --git a/setup.py b/setup.py index b82de85b9b2cd1..84c5df4793ef02 100644 --- a/setup.py +++ b/setup.py @@ -170,6 +170,9 @@ "unidic_lite>=1.0.7", "uvicorn", "beautifulsoup4", + "sudachipy>=0.6.6", + "sudachidict_core>=20220729", + "pyknp>=0.6.1", ] @@ -239,7 +242,7 @@ def run(self): extras = {} -extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic") +extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic", "sudachipy", "sudachidict_core", "pyknp") extras["sklearn"] = deps_list("scikit-learn") extras["tf"] = deps_list("tensorflow", "onnxconverter-common", "tf2onnx", "tensorflow-text") diff --git a/src/transformers/dependency_versions_table.py b/src/transformers/dependency_versions_table.py index d8d2fce767a570..48a803fcdfb267 100644 --- a/src/transformers/dependency_versions_table.py +++ b/src/transformers/dependency_versions_table.py @@ -76,4 +76,7 @@ "unidic_lite": "unidic_lite>=1.0.7", "uvicorn": "uvicorn", "beautifulsoup4": "beautifulsoup4", + "sudachipy": "sudachipy>=0.6.6", + "sudachidict_core": "sudachidict_core>=20220729", + "pyknp": "pyknp>=0.6.1", } diff --git a/src/transformers/models/bert_japanese/tokenization_bert_japanese.py b/src/transformers/models/bert_japanese/tokenization_bert_japanese.py index 0b33e858a105c8..0b6ccab3c495b1 100644 --- a/src/transformers/models/bert_japanese/tokenization_bert_japanese.py +++ b/src/transformers/models/bert_japanese/tokenization_bert_japanese.py @@ -77,7 +77,7 @@ class BertJapaneseTokenizer(BertTokenizer): r""" - Construct a BERT tokenizer for Japanese text, based on a MecabTokenizer. + Construct a BERT tokenizer for Japanese text. Args: vocab_file (`str`): @@ -89,11 +89,15 @@ class BertJapaneseTokenizer(BertTokenizer): do_subword_tokenize (`bool`, *optional*, defaults to `True`): Whether to do subword tokenization. word_tokenizer_type (`str`, *optional*, defaults to `"basic"`): - Type of word tokenizer. + Type of word tokenizer. Choose from ["basic", "mecab", "sudachi", "jumanpp"]. subword_tokenizer_type (`str`, *optional*, defaults to `"wordpiece"`): - Type of subword tokenizer. - mecab_kwargs (`str`, *optional*): + Type of subword tokenizer. Choose from ["wordpiece", "character"]. + mecab_kwargs (`dict`, *optional*): Dictionary passed to the `MecabTokenizer` constructor. + sudachi_kwargs (`dict`, *optional*): + Dictionary passed to the `SudachiTokenizer` constructor. + jumanpp_kwargs (`dict`, *optional*): + Dictionary passed to the `JumanppTokenizer` constructor. """ vocab_files_names = VOCAB_FILES_NAMES @@ -116,6 +120,8 @@ def __init__( cls_token="[CLS]", mask_token="[MASK]", mecab_kwargs=None, + sudachi_kwargs=None, + jumanpp_kwargs=None, **kwargs ): super(BertTokenizer, self).__init__( @@ -131,6 +137,8 @@ def __init__( subword_tokenizer_type=subword_tokenizer_type, never_split=never_split, mecab_kwargs=mecab_kwargs, + sudachi_kwargs=sudachi_kwargs, + jumanpp_kwargs=jumanpp_kwargs, **kwargs, ) # ^^ We call the grandparent's init, not the parent's. @@ -148,6 +156,8 @@ def __init__( self.lower_case = do_lower_case self.never_split = never_split self.mecab_kwargs = copy.deepcopy(mecab_kwargs) + self.sudachi_kwargs = copy.deepcopy(sudachi_kwargs) + self.jumanpp_kwargs = copy.deepcopy(jumanpp_kwargs) if do_word_tokenize: if word_tokenizer_type == "basic": self.word_tokenizer = BasicTokenizer( @@ -157,6 +167,14 @@ def __init__( self.word_tokenizer = MecabTokenizer( do_lower_case=do_lower_case, never_split=never_split, **(mecab_kwargs or {}) ) + elif word_tokenizer_type == "sudachi": + self.word_tokenizer = SudachiTokenizer( + do_lower_case=do_lower_case, never_split=never_split, **(sudachi_kwargs or {}) + ) + elif word_tokenizer_type == "jumanpp": + self.word_tokenizer = JumanppTokenizer( + do_lower_case=do_lower_case, never_split=never_split, **(jumanpp_kwargs or {}) + ) else: raise ValueError(f"Invalid word_tokenizer_type '{word_tokenizer_type}' is specified.") @@ -176,7 +194,7 @@ def do_lower_case(self): def __getstate__(self): state = dict(self.__dict__) - if self.word_tokenizer_type == "mecab": + if self.word_tokenizer_type in ["mecab", "sudachi", "jumanpp"]: del state["word_tokenizer"] return state @@ -186,6 +204,14 @@ def __setstate__(self, state): self.word_tokenizer = MecabTokenizer( do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.mecab_kwargs or {}) ) + elif self.word_tokenizer_type == "sudachi": + self.word_tokenizer = SudachiTokenizer( + do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.sudachi_kwargs or {}) + ) + elif self.word_tokenizer_type == "jumanpp": + self.word_tokenizer = JumanppTokenizer( + do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.jumanpp_kwargs or {}) + ) def _tokenize(self, text): if self.do_word_tokenize: @@ -309,6 +335,157 @@ def tokenize(self, text, never_split=None, **kwargs): return tokens +class SudachiTokenizer: + """Runs basic tokenization with Sudachi morphological parser.""" + + def __init__( + self, + do_lower_case=False, + never_split=None, + normalize_text=True, + trim_whitespace=False, + sudachi_split_mode="A", + sudachi_config_path=None, + sudachi_resource_dir=None, + sudachi_dict_type="core", + ): + """ + Constructs a SudachiTokenizer. + + Args: + **do_lower_case**: (*optional*) boolean (default True) + Whether to lowercase the input. + **never_split**: (*optional*) list of str + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. + **normalize_text**: (*optional*) boolean (default True) + Whether to apply unicode normalization to text before tokenization. + **trim_whitespace**: (*optional*) boolean (default False) + Whether to trim all whitespace, tab, newline from tokens. + **sudachi_split_mode**: (*optional*) string + Split mode of sudachi, choose from "A", "B", "C". + **sudachi_config_path**: (*optional*) string + **sudachi_resource_dir**: (*optional*) string + **sudachi_dict_type**: (*optional*) string + dict type of sudachi, choose from "small", "core", "full". + """ + + self.do_lower_case = do_lower_case + self.never_split = never_split if never_split is not None else [] + self.normalize_text = normalize_text + self.trim_whitespace = trim_whitespace + + try: + from sudachipy import dictionary, tokenizer + except ImportError: + raise ImportError( + "You need to install sudachipy to use SudachiTokenizer. " + "See https://github.com/WorksApplications/SudachiPy for installation." + ) + + if sudachi_split_mode == "A": + self.split_mode = tokenizer.Tokenizer.SplitMode.A + elif sudachi_split_mode == "B": + self.split_mode = tokenizer.Tokenizer.SplitMode.B + elif sudachi_split_mode == "C": + self.split_mode = tokenizer.Tokenizer.SplitMode.C + else: + raise ValueError("Invalid sudachi_split_mode is specified.") + + self.sudachi = dictionary.Dictionary( + config_path=sudachi_config_path, resource_dir=sudachi_resource_dir, dict_type=sudachi_dict_type + ).create(self.split_mode) + + def tokenize(self, text, never_split=None, **kwargs): + """Tokenizes a piece of text.""" + if self.normalize_text: + text = unicodedata.normalize("NFKC", text) + + never_split = self.never_split + (never_split if never_split is not None else []) + tokens = [] + + for word in self.sudachi.tokenize(text): + token = word.surface() + + if self.do_lower_case and token not in never_split: + token = token.lower() + + if self.trim_whitespace: + if token.strip() == "": + continue + else: + token = token.strip() + + tokens.append(token) + + return tokens + + +class JumanppTokenizer: + """Runs basic tokenization with jumanpp morphological parser.""" + + def __init__( + self, + do_lower_case=False, + never_split=None, + normalize_text=True, + trim_whitespace=False, + ): + """ + Constructs a JumanppTokenizer. + + Args: + **do_lower_case**: (*optional*) boolean (default True) + Whether to lowercase the input. + **never_split**: (*optional*) list of str + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of tokens not to split. + **normalize_text**: (*optional*) boolean (default True) + Whether to apply unicode normalization to text before tokenization. + **trim_whitespace**: (*optional*) boolean (default False) + Whether to trim all whitespace, tab, newline from tokens. + """ + + self.do_lower_case = do_lower_case + self.never_split = never_split if never_split is not None else [] + self.normalize_text = normalize_text + self.trim_whitespace = trim_whitespace + + try: + import pyknp + except ImportError: + raise ImportError( + "You need to install pyknp to use JumanppTokenizer. " + "See https://github.com/ku-nlp/pyknp for installation." + ) + + self.juman = pyknp.Juman(jumanpp=True) + + def tokenize(self, text, never_split=None, **kwargs): + """Tokenizes a piece of text.""" + if self.normalize_text: + text = unicodedata.normalize("NFKC", text) + + never_split = self.never_split + (never_split if never_split is not None else []) + tokens = [] + + for mrph in self.juman.analysis(text).mrph_list(): + token = mrph.midasi + + if self.do_lower_case and token not in never_split: + token = token.lower() + + if self.trim_whitespace: + if token.strip() == "": + continue + else: + token = token.strip() + + tokens.append(token) + + return tokens + + class CharacterTokenizer: """Runs Character tokenization.""" diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 7e3242e94c945c..08409b6e092d33 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -52,6 +52,7 @@ is_flax_available, is_ftfy_available, is_ipex_available, + is_jumanpp_available, is_librosa_available, is_onnx_available, is_pandas_available, @@ -66,6 +67,7 @@ is_sentencepiece_available, is_soundfile_availble, is_spacy_available, + is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tf2onnx_available, @@ -671,6 +673,20 @@ def require_usr_bin_time(test_case): return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case) +def require_sudachi(test_case): + """ + Decorator marking a test that requires sudachi + """ + return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case) + + +def require_jumanpp(test_case): + """ + Decorator marking a test that requires jumanpp + """ + return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case) + + def get_gpu_count(): """ Return the number of available gpus (regardless of whether torch, tf or jax is used) diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index fdd1c376dabdbf..24d32d0a012278 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -98,6 +98,7 @@ is_ftfy_available, is_in_notebook, is_ipex_available, + is_jumanpp_available, is_librosa_available, is_ninja_available, is_onnx_available, @@ -121,6 +122,7 @@ is_soundfile_availble, is_spacy_available, is_speech_available, + is_sudachi_available, is_tensorflow_probability_available, is_tensorflow_text_available, is_tf2onnx_available, diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 81b7c478c1b2aa..2fc52b52a2025f 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -18,6 +18,7 @@ import importlib.util import json import os +import shutil import sys import warnings from collections import OrderedDict @@ -671,6 +672,14 @@ def is_ccl_available(): return _is_ccl_available +def is_sudachi_available(): + return importlib.util.find_spec("sudachipy") is not None + + +def is_jumanpp_available(): + return (importlib.util.find_spec("pyknp") is not None) and (shutil.which("jumanpp") is not None) + + # docstyle-ignore DATASETS_IMPORT_ERROR = """ {0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with: diff --git a/tests/models/bert_japanese/test_tokenization_bert_japanese.py b/tests/models/bert_japanese/test_tokenization_bert_japanese.py index 86b3f16f101e03..9aba5c3705a8a5 100644 --- a/tests/models/bert_japanese/test_tokenization_bert_japanese.py +++ b/tests/models/bert_japanese/test_tokenization_bert_japanese.py @@ -24,10 +24,12 @@ BertJapaneseTokenizer, BertTokenizer, CharacterTokenizer, + JumanppTokenizer, MecabTokenizer, + SudachiTokenizer, WordpieceTokenizer, ) -from transformers.testing_utils import custom_tokenizers +from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @@ -172,6 +174,150 @@ def test_mecab_tokenizer_no_normalize(self): ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"], ) + @require_sudachi + def test_pickle_sudachi_tokenizer(self): + tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="sudachi") + self.assertIsNotNone(tokenizer) + + text = "こんにちは、世界。\nこんばんは、世界。" + tokens = tokenizer.tokenize(text) + self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) + self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) + + filename = os.path.join(self.tmpdirname, "tokenizer.bin") + with open(filename, "wb") as handle: + pickle.dump(tokenizer, handle) + + with open(filename, "rb") as handle: + tokenizer_new = pickle.load(handle) + + tokens_loaded = tokenizer_new.tokenize(text) + + self.assertListEqual(tokens, tokens_loaded) + + @require_sudachi + def test_sudachi_tokenizer_core(self): + tokenizer = SudachiTokenizer(sudachi_dict_type="core") + + self.assertListEqual( + tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), + # fmt: off + [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], + # fmt: on + ) + + @require_sudachi + def test_sudachi_tokenizer_split_mode_A(self): + tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="A") + + self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国", "人", "参政", "権"]) + + @require_sudachi + def test_sudachi_tokenizer_split_mode_B(self): + tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="B") + + self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人", "参政権"]) + + @require_sudachi + def test_sudachi_tokenizer_split_mode_C(self): + tokenizer = SudachiTokenizer(sudachi_dict_type="core", sudachi_split_mode="C") + + self.assertListEqual(tokenizer.tokenize("外国人参政権"), ["外国人参政権"]) + + @require_sudachi + def test_sudachi_tokenizer_lower(self): + tokenizer = SudachiTokenizer(do_lower_case=True, sudachi_dict_type="core") + + self.assertListEqual( + tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), + # fmt: off + [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "], + # fmt: on + ) + + @require_sudachi + def test_sudachi_tokenizer_no_normalize(self): + tokenizer = SudachiTokenizer(normalize_text=False, sudachi_dict_type="core") + + self.assertListEqual( + tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), + # fmt: off + [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "], + # fmt: on + ) + + @require_sudachi + def test_sudachi_tokenizer_trim_whitespace(self): + tokenizer = SudachiTokenizer(trim_whitespace=True, sudachi_dict_type="core") + + self.assertListEqual( + tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), + ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"], + ) + + @require_jumanpp + def test_pickle_jumanpp_tokenizer(self): + tokenizer = self.tokenizer_class(self.vocab_file, word_tokenizer_type="jumanpp") + self.assertIsNotNone(tokenizer) + + text = "こんにちは、世界。\nこんばんは、世界。" + tokens = tokenizer.tokenize(text) + self.assertListEqual(tokens, ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"]) + self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [3, 12, 10, 14, 4, 9, 12, 10, 14]) + + filename = os.path.join(self.tmpdirname, "tokenizer.bin") + with open(filename, "wb") as handle: + pickle.dump(tokenizer, handle) + + with open(filename, "rb") as handle: + tokenizer_new = pickle.load(handle) + + tokens_loaded = tokenizer_new.tokenize(text) + + self.assertListEqual(tokens, tokens_loaded) + + @require_jumanpp + def test_jumanpp_tokenizer(self): + tokenizer = JumanppTokenizer() + + self.assertListEqual( + tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), + # fmt: off + ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"], + # fmt: on + ) + + @require_jumanpp + def test_jumanpp_tokenizer_lower(self): + tokenizer = JumanppTokenizer(do_lower_case=True) + + self.assertListEqual( + tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), + # fmt: off + ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"], + # fmt: on + ) + + @require_jumanpp + def test_jumanpp_tokenizer_no_normalize(self): + tokenizer = JumanppTokenizer(normalize_text=False) + + self.assertListEqual( + tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), + # fmt: off + ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"], + # fmt: on + ) + + @require_jumanpp + def test_jumanpp_tokenizer_trim_whitespace(self): + tokenizer = JumanppTokenizer(trim_whitespace=True) + + self.assertListEqual( + tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 "), + ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"], + ) + def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"] From e794ca5b165801eed164a49bbd3c3f46ec147add Mon Sep 17 00:00:00 2001 From: Harsha Date: Wed, 5 Oct 2022 08:49:14 -0700 Subject: [PATCH 468/539] Frees LongformerTokenizer of the Roberta dependency (#19346) * copies over roberta tokenizer to longformertokenizer since they are both identical * adds Copied from patterns to pass copy check --- .../longformer/tokenization_longformer.py | 368 +++++++++++++++++- 1 file changed, 362 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/longformer/tokenization_longformer.py b/src/transformers/models/longformer/tokenization_longformer.py index b594580647a228..64bbeeb8ce51ae 100644 --- a/src/transformers/models/longformer/tokenization_longformer.py +++ b/src/transformers/models/longformer/tokenization_longformer.py @@ -13,8 +13,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json +import os +from functools import lru_cache +from typing import List, Optional, Tuple + +import regex as re + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging -from ..roberta.tokenization_roberta import RobertaTokenizer logger = logging.get_logger(__name__) @@ -64,13 +71,362 @@ } -class LongformerTokenizer(RobertaTokenizer): - r""" - Construct a Longformer tokenizer. +@lru_cache() +# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +# Copied from transformers.models.roberta.tokenization_roberta.get_pairs +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + - [`LongformerTokenizer`] is identical to [`RobertaTokenizer`]. Refer to the superclass for usage examples and - documentation concerning parameters. +# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer with roberta-base->allenai/longformer-base-4096, RoBERTa->Longformer all-casing, RobertaTokenizer->LongformerTokenizer +class LongformerTokenizer(PreTrainedTokenizer): """ + Constructs a Longformer tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ``` + >>> from transformers import LongformerTokenizer + >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-base-4096") + >>> tokenizer("Hello world")['input_ids'] + [0, 31414, 232, 328, 2] + >>> tokenizer(" Hello world")['input_ids'] + [0, 20920, 232, 2] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). + + + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (Longformer tokenizer detect beginning of words by the preceding space). + """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + bos_token="", + eos_token="", + sep_token="", + cls_token="", + unk_token="", + pad_token="", + mask_token="", + add_prefix_space=False, + **kwargs + ): + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token + cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token + + # Mask token behave like a normal word, i.e. include the space before it + mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token + + super().__init__( + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + sep_token=sep_token, + cls_token=cls_token, + pad_token=pad_token, + mask_token=mask_token, + add_prefix_space=add_prefix_space, + **kwargs, + ) + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().split("\n")[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + self.add_prefix_space = add_prefix_space + + # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions + self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + @property + def vocab_size(self): + return len(self.encoder) + + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A Longformer sequence has the following format: + + - single sequence: ` X ` + - pair of sequences: ` A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. Longformer does + not make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): + add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) + if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): + text = " " + text + return (text, kwargs) From 4cbc797b27c461be3c9c2ae3d95ec47fb898e562 Mon Sep 17 00:00:00 2001 From: Younes Belkada <49240599+younesbelkada@users.noreply.github.com> Date: Wed, 5 Oct 2022 18:12:13 +0200 Subject: [PATCH 469/539] Change `BloomConfig` docstring (#19336) * change `BloomConfig` docstring - slightly change the docstring of the `BloomConfig` - Use correct default vocab size - Use correct default `hidden_dim`, `n_head` * Update src/transformers/models/bloom/configuration_bloom.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/bloom/configuration_bloom.py Co-authored-by: SaulLu <55560583+SaulLu@users.noreply.github.com> * make style Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: SaulLu <55560583+SaulLu@users.noreply.github.com> --- .../models/bloom/configuration_bloom.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/bloom/configuration_bloom.py b/src/transformers/models/bloom/configuration_bloom.py index 1103a8148ae1e3..4f973d93ae48ae 100644 --- a/src/transformers/models/bloom/configuration_bloom.py +++ b/src/transformers/models/bloom/configuration_bloom.py @@ -53,14 +53,16 @@ class BloomConfig(PretrainedConfig): Args: - vocab_size (`int`, *optional*, defaults to 50257): - Vocabulary size of the Bloom model. Defines the number of different tokens that can be represented by the - `inputs_ids` passed when calling [`BloomModel`]. - hidden_size (`int`, *optional*, defaults to 768): + vocab_size (`int`, *optional*, defaults to 250880): + Vocabulary size of the Bloom model. Defines the maximum number of different tokens that can be represented + by the `inputs_ids` passed when calling [`BloomModel`]. Check [this + discussion](https://huggingface.co/bigscience/bloom/discussions/120#633d28389addb8530b406c2a) on how the + `vocab_size` has been defined. + hidden_size (`int`, *optional*, defaults to 64): Dimensionality of the embeddings and hidden states. - n_layer (`int`, *optional*, defaults to 12): + n_layer (`int`, *optional*, defaults to 2): Number of hidden layers in the Transformer encoder. - n_head (`int`, *optional*, defaults to 12): + n_head (`int`, *optional*, defaults to 8): Number of attention heads for each attention layer in the Transformer encoder. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. From c875a96eb154f2ea4a831a48b47c37c1c3da99e7 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 5 Oct 2022 12:23:48 -0400 Subject: [PATCH 470/539] Test failing test while we resolve the issue. (#19355) --- tests/models/maskformer/test_feature_extraction_maskformer.py | 1 + tests/pipelines/test_pipelines_image_segmentation.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/maskformer/test_feature_extraction_maskformer.py b/tests/models/maskformer/test_feature_extraction_maskformer.py index 41ff2d10aa524a..694d272f32e17f 100644 --- a/tests/models/maskformer/test_feature_extraction_maskformer.py +++ b/tests/models/maskformer/test_feature_extraction_maskformer.py @@ -399,6 +399,7 @@ def test_post_process_semantic_segmentation(self): self.assertEqual(segmentation[0].shape, target_sizes[0]) + @unittest.skip("Fix me Alara!") def test_post_process_panoptic_segmentation(self): fature_extractor = self.feature_extraction_class(num_labels=self.feature_extract_tester.num_classes) outputs = self.feature_extract_tester.get_fake_maskformer_outputs() diff --git a/tests/pipelines/test_pipelines_image_segmentation.py b/tests/pipelines/test_pipelines_image_segmentation.py index f35c6640c587e7..3841bc1ab78764 100644 --- a/tests/pipelines/test_pipelines_image_segmentation.py +++ b/tests/pipelines/test_pipelines_image_segmentation.py @@ -75,7 +75,7 @@ class ImageSegmentationPipelineTests(unittest.TestCase, metaclass=PipelineTestCa def get_test_pipeline(self, model, tokenizer, feature_extractor): # Fix me Alara - if model.__class__.__name__ == "DetrForSegmentation": + if model.__class__.__name__ in ["DetrForSegmentation", "MaskFormerForInstanceSegmentation"]: return None, None image_segmenter = ImageSegmentationPipeline(model=model, feature_extractor=feature_extractor) return image_segmenter, [ From 071df6eb1333d2525ffbda81f292e199393598e4 Mon Sep 17 00:00:00 2001 From: Matt Date: Wed, 5 Oct 2022 18:03:49 +0100 Subject: [PATCH 471/539] Call _set_save_spec() when creating TF models (#19321) * Add a build_from_serving_sig_and_dummies method and replace all calls like model(model.dummy_inputs) with it. * make fixup * Remove the overridden save() as this is no longer necessary * Also call _set_save_spec(), the last missing piece * Ensure we set the save spec when loading from config too * Turn this whole thing into a one-line PR * Turn this whole thing into a one-line PR * Turn this whole thing into a one-line PR Co-authored-by: Your Name --- src/transformers/modeling_tf_utils.py | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 8bccea12b33f0c..1f341fa053cfad 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -1049,6 +1049,8 @@ def __init__(self, config, *inputs, **kwargs): # Save config and origin of the pretrained weights if given in model self.config = config self.name_or_path = config.name_or_path + # Set the serving spec quickly to ensure that Keras doesn't use the specific dummy input shapes as the spec + self._set_save_spec(self.serving.input_signature[0]) def get_config(self): return self.config.to_dict() @@ -1097,29 +1099,6 @@ def serving_output(output): """ raise NotImplementedError - def save( - self, - filepath, - overwrite=True, - include_optimizer=True, - save_format=None, - signatures=None, - options=None, - save_traces=True, - ): - # Very simple wrapper that ensures we set the correct serving signature when saving - if signatures is None and hasattr(self, "serving"): - signatures = self.serving - super().save( - filepath, - overwrite=overwrite, - include_optimizer=include_optimizer, - save_format=save_format, - signatures=signatures, - options=options, - save_traces=save_traces, - ) - def get_input_embeddings(self) -> tf.keras.layers.Layer: """ Returns the model's input embeddings layer. From 226b8ef063e210794d177cdf8e1cbf2a302c6d08 Mon Sep 17 00:00:00 2001 From: Paula Isabel <102936794+paulaxisabel@users.noreply.github.com> Date: Thu, 6 Oct 2022 01:40:38 +0800 Subject: [PATCH 472/539] correct typos in README (#19304) --- docs/README.md | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/README.md b/docs/README.md index 9edce16782f04d..e58b1d4830f6b1 100644 --- a/docs/README.md +++ b/docs/README.md @@ -16,7 +16,7 @@ limitations under the License. # Generating the documentation -To generate the documentation, you first have to build it. Several packages are necessary to build the doc, +To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash @@ -33,7 +33,7 @@ pip install git+https://github.com/huggingface/doc-builder **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to -check how they look like before committing for instance). You don't have to commit the built documentation. +check how they look before committing for instance). You don't have to commit the built documentation. --- @@ -88,7 +88,7 @@ the filename without the extension in the [`_toctree.yml`](https://github.com/hu ## Renaming section headers and moving sections -It helps to keep the old links working when renaming section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums and Social media and it'd be make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. +It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. @@ -99,7 +99,7 @@ Sections that were moved: [ Section A ] ``` -and of course if you moved it to another file, then: +and of course, if you moved it to another file, then: ``` Sections that were moved: @@ -109,7 +109,7 @@ Sections that were moved: Use the relative style to link to the new file so that the versioned docs continue to work. -For an example of a rich moved sections set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/main_classes/trainer.mdx). +For an example of a rich moved section set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.mdx). ## Writing Documentation - Specification @@ -126,7 +126,7 @@ Adding a new tutorial or section is done in two steps: - Link that file in `./source/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so -depending on the intended targets (beginners, more advanced users or researchers) it should go in section two, three or +depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or four. ### Translating @@ -177,8 +177,8 @@ not to be displayed in the documentation, you can do so by specifying which meth - save_vocabulary ``` -If you just want to add a method that is not documented (for instance magic method like `__call__` are not documented -byt default) you can put the list of methods to add in a list that contains `all`: +If you just want to add a method that is not documented (for instance magic methods like `__call__` are not documented +by default) you can put the list of methods to add in a list that contains `all`: ``` ## XXXTokenizer @@ -191,9 +191,9 @@ byt default) you can put the list of methods to add in a list that contains `all ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names -and objects like True, None or any strings should usually be put in `code`. +and objects like True, None, or any strings should usually be put in `code`. -When mentioning a class, function or method, it is recommended to use our syntax for internal links so that our tool +When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or function to be in the main package. @@ -207,7 +207,7 @@ The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\ #### Defining arguments in a method Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and -an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon and its +an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its description: ``` @@ -216,7 +216,7 @@ description: ``` If the description is too long to fit in one line, another indentation is necessary before writing the description -after th argument. +after the argument. Here's an example showcasing everything so far: @@ -266,7 +266,7 @@ Multi-line code blocks can be useful for displaying examples. They are done betw ```` We follow the [doctest](https://docs.python.org/3/library/doctest.html) syntax for the examples to automatically test -the results stay consistent with the library. +the results to stay consistent with the library. #### Writing a return block @@ -274,27 +274,27 @@ The return block should be introduced with the `Returns:` prefix, followed by a The first line should be the type of the return, followed by a line return. No need to indent further for the elements building the return. -Here's an example for a single value return: +Here's an example of a single value return: ``` Returns: `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. ``` -Here's an example for tuple return, comprising several objects: +Here's an example of a tuple return, comprising several objects: ``` Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- - Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. + Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``` #### Adding an image -Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted `dataset` like +Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images). If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images @@ -364,7 +364,7 @@ We use pytests' [doctest integration](https://docs.pytest.org/doctest.html) to v For Transformers, the doctests are run on a daily basis via GitHub Actions as can be seen [here](https://github.com/huggingface/transformers/actions/workflows/doctests.yml). -To include your example in the daily doctests, you need add the filename that +To include your example in the daily doctests, you need to add the filename that contains the example docstring to the [documentation_tests.txt](../utils/documentation_tests.txt). ### For Python files @@ -426,6 +426,6 @@ Here are a few tips to help you debug the doctests and make them pass: - The outputs of the code need to match the expected output **exactly**, so make sure you have the same outputs. In particular doctest will see a difference between single quotes and double quotes, or a missing parenthesis. The only exceptions to that rule are: * whitespace: one give whitespace (space, tabulation, new line) is equivalent to any number of whitespace, so you can add new lines where there are spaces to make your output more readable. - * numerical values: you should never put more than 4 or 5 digits to expected results as different setups or library versions might get you slightly different results. `doctest` is configure to ignore any difference lower than the precision to which you wrote (so 1e-4 if you write 4 digits). + * numerical values: you should never put more than 4 or 5 digits to expected results as different setups or library versions might get you slightly different results. `doctest` is configured to ignore any difference lower than the precision to which you wrote (so 1e-4 if you write 4 digits). - Don't leave a block of code that is very long to execute. If you can't make it fast, you can either not use the doctest syntax on it (so that it's ignored), or if you want to use the doctest syntax to show the results, you can add a comment `# doctest: +SKIP` at the end of the lines of code too long to execute - Each line of code that produces a result needs to have that result written below. You can ignore an output if you don't want to show it in your code example by adding a comment ` # doctest: +IGNORE_RESULT` at the end of the line of code producing it. From d9101b71bcd2e2a1f26cf07057ffd15572997105 Mon Sep 17 00:00:00 2001 From: Harsha Date: Wed, 5 Oct 2022 10:50:15 -0700 Subject: [PATCH 473/539] Removes Roberta and Bert config dependencies from Longformer (#19343) * removes roberta and bert config dependencies from longformer * adds copied from statements * fixes style * removes excessive comments and replace bert with longformer in a couple places * fixes style --- .../longformer/configuration_longformer.py | 94 +++++++++++++++++-- 1 file changed, 87 insertions(+), 7 deletions(-) diff --git a/src/transformers/models/longformer/configuration_longformer.py b/src/transformers/models/longformer/configuration_longformer.py index 977ca3e639c50e..f9d9834c1ae2dc 100644 --- a/src/transformers/models/longformer/configuration_longformer.py +++ b/src/transformers/models/longformer/configuration_longformer.py @@ -16,13 +16,12 @@ from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union +from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import TensorType, logging -from ..roberta.configuration_roberta import RobertaConfig if TYPE_CHECKING: - from ...configuration_utils import PretrainedConfig from ...onnx.config import PatchingSpec from ...tokenization_utils_base import PreTrainedTokenizerBase @@ -44,7 +43,7 @@ } -class LongformerConfig(RobertaConfig): +class LongformerConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`LongformerModel`] or a [`TFLongformerModel`]. It is used to instantiate a Longformer model according to the specified arguments, defining the model architecture. @@ -55,10 +54,49 @@ class LongformerConfig(RobertaConfig): [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) architecture with a sequence length 4,096. - The [`LongformerConfig`] class directly inherits [`RobertaConfig`]. It reuses the same defaults. Please check the - parent class for more information. + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the Longformer model. Defines the number of different tokens that can be represented by + the `inputs_ids` passed when calling [`LongformerModel`] or [`TFLongformerModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`LongformerModel`] or + [`TFLongformerModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. attention_window (`int` or `List[int]`, *optional*, defaults to 512): Size of an attention window around each token. If an `int`, use the same size for all layers. To specify a different window size for each layer, use a `List[int]` where `len(attention_window) == num_hidden_layers`. @@ -80,10 +118,52 @@ class LongformerConfig(RobertaConfig): model_type = "longformer" def __init__( - self, attention_window: Union[List[int], int] = 512, sep_token_id: int = 2, onnx_export: bool = False, **kwargs + self, + attention_window: Union[List[int], int] = 512, + sep_token_id: int = 2, + pad_token_id: int = 1, + bos_token_id: int = 0, + eos_token_id: int = 2, + vocab_size: int = 30522, + hidden_size: int = 768, + num_hidden_layers: int = 12, + num_attention_heads: int = 12, + intermediate_size: int = 3072, + hidden_act: str = "gelu", + hidden_dropout_prob: float = 0.1, + attention_probs_dropout_prob: float = 0.1, + max_position_embeddings: int = 512, + type_vocab_size: int = 2, + initializer_range: float = 0.02, + layer_norm_eps: float = 1e-12, + position_embedding_type: str = "absolute", + use_cache: bool = True, + classifier_dropout: float = None, + onnx_export: bool = False, + **kwargs ): - super().__init__(sep_token_id=sep_token_id, **kwargs) + """Constructs LongformerConfig.""" + super().__init__(pad_token_id=pad_token_id, **kwargs) + self.attention_window = attention_window + self.sep_token_id = sep_token_id + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout self.onnx_export = onnx_export From ad98642a82a4f157b05d7f4af25bec1ae74077ba Mon Sep 17 00:00:00 2001 From: Zachary Mueller Date: Wed, 5 Oct 2022 14:52:01 -0400 Subject: [PATCH 474/539] Fix gather for metrics (#19360) --- examples/pytorch/summarization/run_summarization_no_trainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index 594d468330e78a..9b8ee1a1dc876d 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -685,7 +685,7 @@ def postprocess_text(preds, labels): # If we did not pad to max length, we need to pad the labels too labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id) - generated_tokens, labels = accelerator.gather_for_metrics(generated_tokens, labels) + generated_tokens, labels = accelerator.gather_for_metrics((generated_tokens, labels)) generated_tokens = generated_tokens.cpu().numpy() labels = labels.cpu().numpy() From 7598791c092d49555ec2aae1c92cf08a2eadb9e9 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Wed, 5 Oct 2022 23:25:58 +0300 Subject: [PATCH 475/539] Fix MaskFormer failing postprocess tests (#19354) Ensures post_process_instance_segmentation and post_process_panoptic_segmentation methods return a tensor of shape (target_height, target_width) filled with -1 values if no segment with score > threshold is found. --- .../models/maskformer/feature_extraction_maskformer.py | 10 ++++++---- .../maskformer/test_feature_extraction_maskformer.py | 7 ++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/transformers/models/maskformer/feature_extraction_maskformer.py b/src/transformers/models/maskformer/feature_extraction_maskformer.py index c86fce646b8ef8..1808c967e38407 100644 --- a/src/transformers/models/maskformer/feature_extraction_maskformer.py +++ b/src/transformers/models/maskformer/feature_extraction_maskformer.py @@ -772,8 +772,9 @@ def post_process_instance_segmentation( # No mask found if mask_probs_item.shape[0] <= 0: - segmentation = None - segments: List[Dict] = [] + height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] + segmentation = torch.zeros((height, width)) - 1 + results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item @@ -860,8 +861,9 @@ def post_process_panoptic_segmentation( # No mask found if mask_probs_item.shape[0] <= 0: - segmentation = None - segments: List[Dict] = [] + height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] + segmentation = torch.zeros((height, width)) - 1 + results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item diff --git a/tests/models/maskformer/test_feature_extraction_maskformer.py b/tests/models/maskformer/test_feature_extraction_maskformer.py index 694d272f32e17f..fbafa9af155c6e 100644 --- a/tests/models/maskformer/test_feature_extraction_maskformer.py +++ b/tests/models/maskformer/test_feature_extraction_maskformer.py @@ -401,10 +401,11 @@ def test_post_process_semantic_segmentation(self): @unittest.skip("Fix me Alara!") def test_post_process_panoptic_segmentation(self): - fature_extractor = self.feature_extraction_class(num_labels=self.feature_extract_tester.num_classes) + feature_extractor = self.feature_extraction_class(num_labels=self.feature_extract_tester.num_classes) outputs = self.feature_extract_tester.get_fake_maskformer_outputs() - segmentation = fature_extractor.post_process_panoptic_segmentation(outputs, threshold=0) - + segmentation = feature_extractor.post_process_panoptic_segmentation(outputs, threshold=0) + print(len(segmentation)) + print(self.feature_extract_tester.batch_size) self.assertTrue(len(segmentation) == self.feature_extract_tester.batch_size) for el in segmentation: self.assertTrue("segmentation" in el) From 45e14038f20d7f04574af4ce8356bab11e3d6741 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Wed, 5 Oct 2022 22:28:31 +0200 Subject: [PATCH 476/539] Add WhisperModel to transformers (#19166) * simplify loop * add featur extractor * add model * start conversion * add dropout * initial commit of test files * copnversion for all models * update processor for correct padding * update feature extraction * update integration test logits match * fmnt: off for the logits * on the fly mel bank * small nit * update test * update tokenizer * nit feature extraction * update * update tokenizer test * adds logit processor and update tokenizer to get supress tokens * style * clean convert * revert to original modeling tf utils * Update * update * nit * clean convert file * update tests and nits * quality * slow generation test * ffn_dim to allow customization * update readme * add to toctreee * start fixing integration tests * update tests and code * fix feature extractor * fix config tests common * update code to fix tests * fix feature exctractor * nit feature extraction * update test for new feature extractor * style * add absrtact * large logits wioth custom decoder input ids * wraap around is otrch available * fix feature extractor * correct logits for whisper small.en * nit * fix encoder_attentino_mask * some fixes * remove unnecessary inputs * nits * add normalizer file * update etst tokenization * fix attention mask not defined * Add model to README * Fix doc tests * fix generate * remove uncoder attention mask useless * update test modeling whisper * update condfig to add second non supress tokens * nits on feature exrtactor * nit for test tokenizers * update etsts * update tests * update tokenization test * fixup * invalidated hf token. Clean convert openai to whisper * fix logit tests * fixup * clean merge * revert toc_tree changes * remove useless LogitProcessor * Update whisper .mdx * update config file doc * update configuration docstring * update test tokenization * update test tokenization * update tokenization whisper Added copied from where needed * update feature extraction * nit test name * style * quality * remove get suppress tokens and update non_speech tokens global variables * Update src/transformers/models/whisper/feature_extraction_whisper.py Co-authored-by: Patrick von Platen * clean modeling whisper and test Removed the attention mask arguments that are deprecated * fix large test * Add multilingual audio test, and translate test * style * fix larg multilingual test * nits * Update docs/source/en/model_doc/whisper.mdx Co-authored-by: Patrick von Platen * add copied from for attention layer * remove attention masks in doc * add english normalizer * update tokenization test * remove copied from in whisper attention : no bias in k_proj only * wrap around dependencies in english normalizer * style * correct import generation logits * for now, wrap feature extractor with torch * Update src/transformers/models/whisper/convert_openai_whisper_to_tfms.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/whisper/configuration_whisper.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/model_doc/whisper.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * remove torch depencies for feature extraction and style * fixup * nit * update logitds * style * nit * nits and fix final tests * add `is_more_itertools_available` to utils * quality * add begin supress tokens, supress tokens to generate args and config * clean supressTokensLogitProcessor in generation logits * Nit naming * add supressTokensAtBegin * udpate tests, supress tokens to None or correct values * nit and style * update RAG to fit test and generate_logit * add copy pasted statment on english normalizer * add arguments to config_common_kwargs * Update src/transformers/generation_utils.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/generation_logits_process.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/whisper/configuration_whisper.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Patrick von Platen Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * revert changes based on reviews * update doc and nits * more nits * last nits * update test configuration common * add BART name in decoder attention mask documentation * Update src/transformers/models/whisper/modeling_whisper.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * style * nit * nit * add english.json file to git * nits on documentation * nit * nits * last styling * add main toctree file * remove sentence piece dependency * clean init file * fix tokenizer that has no dependencies on sentencepiece * update whisper init file, nit * remove english.json file * add get decoder prompt id * revert changes and add forced logit processor * nit * clean normalizer * remove protected * update * Update src/transformers/models/whisper/configuration_whisper.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * update based on review * Update src/transformers/models/whisper/configuration_whisper.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * add batched tests Co-authored-by: Patrick von Platen Co-authored-by: NielsRogge Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- README.md | 1 + README_ko.md | 1 + README_zh-hans.md | 1 + README_zh-hant.md | 1 + docs/source/en/_toctree.yml | 2 + docs/source/en/index.mdx | 2 + docs/source/en/model_doc/whisper.mdx | 68 + src/transformers/__init__.py | 28 + src/transformers/configuration_utils.py | 2 + src/transformers/generation_logits_process.py | 46 + src/transformers/generation_utils.py | 36 + src/transformers/models/__init__.py | 1 + .../models/auto/configuration_auto.py | 3 + .../models/auto/feature_extraction_auto.py | 1 + src/transformers/models/auto/modeling_auto.py | 3 + .../models/auto/processing_auto.py | 1 + .../models/auto/tokenization_auto.py | 1 + src/transformers/models/whisper/__init__.py | 67 + .../models/whisper/configuration_whisper.py | 216 +++ .../models/whisper/english_normalizer.py | 602 ++++++++ .../whisper/feature_extraction_whisper.py | 305 ++++ .../models/whisper/modeling_whisper.py | 1230 +++++++++++++++++ .../models/whisper/processing_whisper.py | 124 ++ .../models/whisper/tokenization_whisper.py | 401 ++++++ src/transformers/utils/__init__.py | 1 + src/transformers/utils/dummy_pt_objects.py | 24 + src/transformers/utils/import_utils.py | 4 + tests/models/whisper/__init__.py | 0 .../test_feature_extraction_whisper.py | 225 +++ tests/models/whisper/test_modeling_whisper.py | 1042 ++++++++++++++ .../models/whisper/test_processor_whisper.py | 118 ++ .../whisper/test_tokenization_whisper.py | 190 +++ tests/test_configuration_common.py | 2 + utils/check_repo.py | 2 + utils/documentation_tests.txt | 1 + 35 files changed, 4752 insertions(+) create mode 100644 docs/source/en/model_doc/whisper.mdx create mode 100644 src/transformers/models/whisper/__init__.py create mode 100644 src/transformers/models/whisper/configuration_whisper.py create mode 100644 src/transformers/models/whisper/english_normalizer.py create mode 100644 src/transformers/models/whisper/feature_extraction_whisper.py create mode 100644 src/transformers/models/whisper/modeling_whisper.py create mode 100644 src/transformers/models/whisper/processing_whisper.py create mode 100644 src/transformers/models/whisper/tokenization_whisper.py create mode 100644 tests/models/whisper/__init__.py create mode 100644 tests/models/whisper/test_feature_extraction_whisper.py create mode 100644 tests/models/whisper/test_modeling_whisper.py create mode 100644 tests/models/whisper/test_processor_whisper.py create mode 100644 tests/models/whisper/test_tokenization_whisper.py diff --git a/README.md b/README.md index d711e861cdcd64..d10436c9510357 100644 --- a/README.md +++ b/README.md @@ -393,6 +393,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. diff --git a/README_ko.md b/README_ko.md index c591b50417ad9f..f16e459976087a 100644 --- a/README_ko.md +++ b/README_ko.md @@ -343,6 +343,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. diff --git a/README_zh-hans.md b/README_zh-hans.md index 36b33982d0a130..5112456121b51a 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -367,6 +367,7 @@ conda install -c huggingface transformers 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (来自 Facebook AI) 伴随论文 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 发布。 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (来自 OpenAI) 伴随论文 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 由 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 发布。 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index eef6a3589f4efd..7f0cd19e1868f2 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -379,6 +379,7 @@ conda install -c huggingface transformers 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. diff --git a/docs/source/en/_toctree.yml b/docs/source/en/_toctree.yml index 63a148bd3cc941..e2589bce3e0505 100644 --- a/docs/source/en/_toctree.yml +++ b/docs/source/en/_toctree.yml @@ -449,6 +449,8 @@ title: Wav2Vec2Phoneme - local: model_doc/wavlm title: WavLM + - local: model_doc/whisper + title: Whisper - local: model_doc/xls_r title: XLS-R - local: model_doc/xlsr_wav2vec2 diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 1b862df0b0e4c8..2b6a482aee7534 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -183,6 +183,7 @@ The documentation is organized into five sections: 1. **[Wav2Vec2-Conformer](model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. +1. **[Whisper](model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. 1. **[X-CLIP](model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. @@ -329,6 +330,7 @@ Flax), PyTorch, and/or TensorFlow. | Wav2Vec2 | ✅ | ❌ | ✅ | ✅ | ✅ | | Wav2Vec2-Conformer | ❌ | ❌ | ✅ | ❌ | ❌ | | WavLM | ❌ | ❌ | ✅ | ❌ | ❌ | +| Whisper | ✅ | ❌ | ✅ | ❌ | ❌ | | X-CLIP | ❌ | ❌ | ✅ | ❌ | ❌ | | XGLM | ✅ | ✅ | ✅ | ✅ | ✅ | | XLM | ✅ | ❌ | ✅ | ✅ | ❌ | diff --git a/docs/source/en/model_doc/whisper.mdx b/docs/source/en/model_doc/whisper.mdx new file mode 100644 index 00000000000000..beb7bf3798bcb6 --- /dev/null +++ b/docs/source/en/model_doc/whisper.mdx @@ -0,0 +1,68 @@ + + +# Whisper + +## Overview + +The Whisper model was proposed in [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. + +The abstract from the paper is the following: + +*We study the capabilities of speech processing systems trained simply to predict large amounts of transcripts of audio on the internet. When scaled to 680,000 hours of multilingual and multitask supervision, the resulting models generalize well to standard benchmarks and are often competitive with prior fully supervised results but in a zeroshot transfer setting without the need for any finetuning. When compared to humans, the models approach their accuracy and robustness. We are releasing models and inference code to serve as a foundation for further work on robust speech processing.* + + +Tips: + +- The model usually performs well without requiring any finetuning. +- The architecture follows a classic encoder-decoder architecture, which means that it relies on the [`~generation_utils.GenerationMixin.generate`] function for inference. +- One can use [`WhisperProcessor`] to prepare audio for the model, and decode the predicted ID's back into text. + +This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). +The original code can be found [here](https://github.com/openai/whisper). + + +## WhisperConfig + +[[autodoc]] WhisperConfig + +## WhisperTokenizer + +[[autodoc]] WhisperTokenizer + - build_inputs_with_special_tokens + - get_special_tokens_mask + - create_token_type_ids_from_sequences + - save_vocabulary + +## WhisperFeatureExtractor + +[[autodoc]] WhisperFeatureExtractor + - __call__ + +## WhisperProcessor + +[[autodoc]] WhisperProcessor + - __call__ + - from_pretrained + - save_pretrained + - batch_decode + - decode + +## WhisperModel + +[[autodoc]] WhisperModel + - forward + +## WhisperForConditionalGeneration + +[[autodoc]] WhisperForConditionalGeneration + - forward diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 448300774b8261..18bfea30a09e57 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -390,6 +390,13 @@ "WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig", ], + "models.whisper": [ + "WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", + "WhisperConfig", + "WhisperFeatureExtractor", + "WhisperProcessor", + "WhisperTokenizer", + ], "models.x_clip": [ "XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "XCLIPConfig", @@ -1863,6 +1870,14 @@ "Speech2TextPreTrainedModel", ] ) + _import_structure["models.whisper"].extend( + [ + "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", + "WhisperForConditionalGeneration", + "WhisperModel", + "WhisperPreTrainedModel", + ] + ) _import_structure["models.speech_to_text_2"].extend(["Speech2Text2ForCausalLM", "Speech2Text2PreTrainedModel"]) _import_structure["models.splinter"].extend( [ @@ -3339,6 +3354,13 @@ from .models.wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer from .models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM from .models.wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig + from .models.whisper import ( + WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, + WhisperConfig, + WhisperFeatureExtractor, + WhisperProcessor, + WhisperTokenizer, + ) from .models.x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, @@ -4724,6 +4746,12 @@ WavLMModel, WavLMPreTrainedModel, ) + from .models.whisper import ( + WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, + WhisperForConditionalGeneration, + WhisperModel, + WhisperPreTrainedModel, + ) from .models.x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, diff --git a/src/transformers/configuration_utils.py b/src/transformers/configuration_utils.py index 3fdc0f265f6331..a77d93b8b18016 100755 --- a/src/transformers/configuration_utils.py +++ b/src/transformers/configuration_utils.py @@ -299,6 +299,8 @@ def __init__(self, **kwargs): self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None) self.remove_invalid_values = kwargs.pop("remove_invalid_values", False) self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None) + self.suppress_tokens = kwargs.pop("suppress_tokens", None) + self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None) # Fine-tuning task arguments self.architectures = kwargs.pop("architectures", None) diff --git a/src/transformers/generation_logits_process.py b/src/transformers/generation_logits_process.py index 0e914940546c3b..0e1414aa08bb0b 100644 --- a/src/transformers/generation_logits_process.py +++ b/src/transformers/generation_logits_process.py @@ -702,3 +702,49 @@ class LogitNormalization(LogitsProcessor, LogitsWarper): def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: scores = scores.log_softmax(dim=-1) return scores + + +class SuppressTokensAtBeginLogitsProcessor(LogitsProcessor): + r""" + [`SuppressTokensAtBeginLogitsProcessor`] supresses a list of tokens as soon as the `generate` function starts + generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not + sampled at the begining of the generation. + """ + + def __init__(self, begin_suppress_tokens, begin_index): + self.begin_suppress_tokens = list(begin_suppress_tokens) + self.begin_index = begin_index + + def __call__(self, input_ids, scores): + if input_ids.shape[1] == self.begin_index: + scores[:, self.begin_suppress_tokens] = -float("inf") + + return scores + + +class SuppressTokensLogitsProcessor(LogitsProcessor): + r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they + are not sampled.""" + + def __init__(self, suppress_tokens): + self.suppress_tokens = list(suppress_tokens) + + def __call__(self, input_ids, scores): + scores[:, self.suppress_tokens] = -float("inf") + return scores + + +class ForceTokensLogitsProcessor(LogitsProcessor): + r"""This processor can be used to force a list of tokens. The processor will set their log probs to `inf` so that they + are sampled at their corresponding index.""" + + def __init__(self, force_token_map): + self.force_token_map = dict(force_token_map) + + def __call__(self, input_ids, scores): + generation_idx = input_ids.shape[-1] + current_token = self.force_token_map.get(generation_idx, None) + if current_token is not None: + scores[:, :] = -float("inf") + scores[:, current_token] = 0 + return scores diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 79460c1cad25c7..380eec07270c9c 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -30,6 +30,7 @@ ExponentialDecayLengthPenalty, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, + ForceTokensLogitsProcessor, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitNormalization, @@ -39,6 +40,8 @@ NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, + SuppressTokensAtBeginLogitsProcessor, + SuppressTokensLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, @@ -691,6 +694,9 @@ def _get_logits_processor( exponential_decay_length_penalty: Tuple, logits_processor: Optional[LogitsProcessorList], renormalize_logits: Optional[bool], + suppress_tokens: Optional[List[int]] = None, + begin_suppress_tokens: Optional[List[int]] = None, + forced_decoder_ids: Optional[List[int]] = None, ) -> LogitsProcessorList: """ This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`] @@ -725,6 +731,12 @@ def _get_logits_processor( if exponential_decay_length_penalty is not None else self.config.exponential_decay_length_penalty ) + suppress_tokens = suppress_tokens if suppress_tokens is not None else self.config.suppress_tokens + begin_suppress_tokens = ( + begin_suppress_tokens if begin_suppress_tokens is not None else self.config.begin_suppress_tokens + ) + if forced_decoder_ids is None and hasattr(self.config, "forced_decoder_ids"): + forced_decoder_ids = self.config.forced_decoder_ids # instantiate processors list # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files @@ -762,6 +774,16 @@ def _get_logits_processor( processors.append( ExponentialDecayLengthPenalty(exponential_decay_length_penalty, eos_token_id, input_ids_seq_length) ) + if suppress_tokens is not None: + processors.append(SuppressTokensLogitsProcessor(suppress_tokens)) + if begin_suppress_tokens is not None: + begin_index = input_ids_seq_length + begin_index = begin_index if (input_ids_seq_length > 1 or forced_bos_token_id is None) else begin_index + 1 + if forced_decoder_ids is not None: + begin_index += forced_decoder_ids[-1][0] # generation starts after the last token that is forced + processors.append(SuppressTokensAtBeginLogitsProcessor(begin_suppress_tokens, begin_index)) + if forced_decoder_ids is not None: + processors.append(ForceTokensLogitsProcessor(forced_decoder_ids)) processors = self._merge_criteria_processor_list(processors, logits_processor) # `LogitNormalization` should always be the last logit processor, when present if renormalize_logits is True: @@ -932,6 +954,9 @@ def generate( remove_invalid_values: Optional[bool] = None, synced_gpus: Optional[bool] = False, exponential_decay_length_penalty: Optional[Tuple[Union[int, float]]] = None, + suppress_tokens: Optional[List[int]] = None, + begin_suppress_tokens: Optional[List[int]] = None, + forced_decoder_ids: Optional[List[int]] = None, **model_kwargs, ) -> Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, torch.LongTensor]: r""" @@ -1090,6 +1115,14 @@ def generate( This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty starts and `decay_factor` represents the factor of exponential decay + suppress_tokens (`List[int]`, *optional*, defaults to `model.config.suppress_tokens`): + A list of tokens that will be supressed at generation. The `SupressTokens` logit processor will set + their log probs to `-inf` so that they are not sampled. + begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`): + A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens` + logit processor will set their log probs to `-inf` so that they are not sampled. + forced_decoder_ids (`List[int]`, *optional*, defaults to `model.config.forced_decoder_ids`): + A list of tokens that will be forced as beginning tokens, before sampling. model_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. If the model @@ -1337,6 +1370,9 @@ def generate( exponential_decay_length_penalty=exponential_decay_length_penalty, logits_processor=logits_processor, renormalize_logits=renormalize_logits, + suppress_tokens=suppress_tokens, + begin_suppress_tokens=begin_suppress_tokens, + forced_decoder_ids=forced_decoder_ids, ) # 8. prepare stopping criteria diff --git a/src/transformers/models/__init__.py b/src/transformers/models/__init__.py index f7979e0a77b12b..0f363b22c6800d 100644 --- a/src/transformers/models/__init__.py +++ b/src/transformers/models/__init__.py @@ -159,6 +159,7 @@ wav2vec2_phoneme, wav2vec2_with_lm, wavlm, + whisper, x_clip, xglm, xlm, diff --git a/src/transformers/models/auto/configuration_auto.py b/src/transformers/models/auto/configuration_auto.py index 14fa334b57978a..a67371a1d490bb 100644 --- a/src/transformers/models/auto/configuration_auto.py +++ b/src/transformers/models/auto/configuration_auto.py @@ -152,6 +152,7 @@ ("wav2vec2", "Wav2Vec2Config"), ("wav2vec2-conformer", "Wav2Vec2ConformerConfig"), ("wavlm", "WavLMConfig"), + ("whisper", "WhisperConfig"), ("xclip", "XCLIPConfig"), ("xglm", "XGLMConfig"), ("xlm", "XLMConfig"), @@ -276,6 +277,7 @@ ("vit_msn", "VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("wav2vec2", "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("wav2vec2-conformer", "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"), + ("whisper", "WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("xclip", "X_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("xglm", "XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), ("xlm", "XLM_PRETRAINED_CONFIG_ARCHIVE_MAP"), @@ -434,6 +436,7 @@ ("wav2vec2-conformer", "Wav2Vec2-Conformer"), ("wav2vec2_phoneme", "Wav2Vec2Phoneme"), ("wavlm", "WavLM"), + ("whisper", "Whisper"), ("xclip", "X-CLIP"), ("xglm", "XGLM"), ("xlm", "XLM"), diff --git a/src/transformers/models/auto/feature_extraction_auto.py b/src/transformers/models/auto/feature_extraction_auto.py index 73fe1ad42ad195..586498ee81c5a6 100644 --- a/src/transformers/models/auto/feature_extraction_auto.py +++ b/src/transformers/models/auto/feature_extraction_auto.py @@ -77,6 +77,7 @@ ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), + ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 5cac7e7bda6fcd..4cf9b58a517bfa 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -146,6 +146,7 @@ ("wav2vec2", "Wav2Vec2Model"), ("wav2vec2-conformer", "Wav2Vec2ConformerModel"), ("wavlm", "WavLMModel"), + ("whisper", "WhisperModel"), ("xclip", "XCLIPModel"), ("xglm", "XGLMModel"), ("xlm", "XLMModel"), @@ -273,6 +274,7 @@ ("tapas", "TapasForMaskedLM"), ("transfo-xl", "TransfoXLLMHeadModel"), ("wav2vec2", "Wav2Vec2ForMaskedLM"), + ("whisper", "WhisperForConditionalGeneration"), ("xlm", "XLMWithLMHeadModel"), ("xlm-roberta", "XLMRobertaForMaskedLM"), ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"), @@ -500,6 +502,7 @@ [ ("speech-encoder-decoder", "SpeechEncoderDecoderModel"), ("speech_to_text", "Speech2TextForConditionalGeneration"), + ("whisper", "WhisperForConditionalGeneration"), ] ) diff --git a/src/transformers/models/auto/processing_auto.py b/src/transformers/models/auto/processing_auto.py index 9885cae95e88cb..8281d6a3bd227d 100644 --- a/src/transformers/models/auto/processing_auto.py +++ b/src/transformers/models/auto/processing_auto.py @@ -61,6 +61,7 @@ ("wav2vec2-conformer", "Wav2Vec2Processor"), ("wav2vec2_with_lm", "Wav2Vec2ProcessorWithLM"), ("wavlm", "Wav2Vec2Processor"), + ("whisper", "WhisperProcessor"), ("xclip", "CLIPProcessor"), ] ) diff --git a/src/transformers/models/auto/tokenization_auto.py b/src/transformers/models/auto/tokenization_auto.py index 97e048885e1800..43fb6ce352a3f1 100644 --- a/src/transformers/models/auto/tokenization_auto.py +++ b/src/transformers/models/auto/tokenization_auto.py @@ -255,6 +255,7 @@ ("wav2vec2", ("Wav2Vec2CTCTokenizer", None)), ("wav2vec2-conformer", ("Wav2Vec2CTCTokenizer", None)), ("wav2vec2_phoneme", ("Wav2Vec2PhonemeCTCTokenizer", None)), + ("whisper", ("WhisperTokenizer" if is_sentencepiece_available() else None, None)), ("xclip", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)), ( "xglm", diff --git a/src/transformers/models/whisper/__init__.py b/src/transformers/models/whisper/__init__.py new file mode 100644 index 00000000000000..ea7259cf69c411 --- /dev/null +++ b/src/transformers/models/whisper/__init__.py @@ -0,0 +1,67 @@ +# flake8: noqa +# There's no way to ignore "F401 '...' imported but unused" warnings in this +# module, but to preserve other warnings. So, don't check this module at all. + +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available + + +_import_structure = { + "configuration_whisper": ["WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP", "WhisperConfig"], + "feature_extraction_whisper": ["WhisperFeatureExtractor"], + "processing_whisper": ["WhisperProcessor"], + "tokenization_whisper": ["WhisperTokenizer"], +} + + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_whisper"] = [ + "WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", + "WhisperForConditionalGeneration", + "WhisperModel", + "WhisperPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig + from .feature_extraction_whisper import WhisperFeatureExtractor + from .processing_whisper import WhisperProcessor + from .tokenization_whisper import WhisperTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_whisper import ( + WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, + WhisperForConditionalGeneration, + WhisperModel, + WhisperPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/src/transformers/models/whisper/configuration_whisper.py b/src/transformers/models/whisper/configuration_whisper.py new file mode 100644 index 00000000000000..9d35eb06fd500a --- /dev/null +++ b/src/transformers/models/whisper/configuration_whisper.py @@ -0,0 +1,216 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Whisper model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP = { + "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", +} + +# fmt: off +NON_SPEECH_TOKENS = [ + 1, 2, 6, 7, 8, 9, 10, 12, 14, 25, + 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, + 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, + 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, + 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, + 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786, + 11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791, + 17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409, + 34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361 +] +NON_SPEECH_TOKENS_MULTI = [ + 1, 2, 6, 7, 8, 9, 10, 12, 14, 25, + 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, + 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, + 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, + 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, + 7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793, + 14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675, + 22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865, + 42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362 +] +# fmt: on + + +class WhisperConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`WhisperModel`]. It is used to instantiate a + Whisper model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the Whisper + [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 51865): + Vocabulary size of the Whisper model. Defines the number of different tokens that can be represented by the + `decoder_input_ids` passed when calling [`WhisperModel`] + num_mel_bins (`int`, *optional*, defaults to 80): + Number of mel features used per input features. Should correspond to the value used in the + `WhisperProcessor` class. + encoder_layers (`int`, *optional*, defaults to 6): + Number of encoder layers. + decoder_layers (`int`, *optional*, defaults to 6): + Number of decoder layers. + encoder_attention_heads (`int`, *optional*, defaults to 4): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_attention_heads (`int`, *optional*, defaults to 4): + Number of attention heads for each attention layer in the Transformer decoder. + encoder_ffn_dim (`int`, *optional*, defaults to 1536): + Dimensionality of the "intermediate" (often named feed-forward) layer in encoder. + decoder_ffn_dim (`int`, *optional*, defaults to 1536): + Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. + encoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + decoder_layerdrop (`float`, *optional*, defaults to 0.0): + The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) + for more details. + decoder_start_token_id (`int`, *optional*, defaults to 50257): + Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids` + are provided to the `generate` function. It is used to guide the model`s generation process depending on + the task. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + is_encoder_decoder (`bool`, *optional*, defaults to `True`): + Whether the model is used as an encoder/decoder or not. + activation_function (`str`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + d_model (`int`, *optional*, defaults to 256): + Dimensionality of the layers. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + activation_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for activations inside the fully connected layer. + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + scale_embedding (`bool`, *optional*, defaults to False): + Scale embeddings by diving by sqrt(d_model). + max_source_positions (`int`, *optional*, defaults to 1500): + The maximum sequence length of log-mel filter-bank features that this model might ever be used with. + max_target_positions (`int`, *optional*, defaults to 448): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + pad_token_id (`int`, *optional*, defaults to 50256): + Padding token id. + bos_token_id (`int`, *optional*, defaults to 50256): + Begin of stream token id. + eos_token_id (`int`, *optional*, defaults to 50257): + End of stream token id. + tie_word_embeddings (`bool`, *optional*, defaults to `True`): + Whether to tie input and output embeddings. + suppress_tokens (`List[int]`, *optional*): + A list containing the non-speech tokens that will be used by the logit processor in the `generate` + function. NON_SPEECH_TOKENS and NON_SPEECH_TOKENS_MULTI each correspond to the `english-only` and the + `multilingual` model. + begin_suppress_tokens (`List[int]`, *optional*, defaults to `[220,50256]`): + A list containing tokens that will be supressed at the beginning of the sampling process. Initialized as + the token for `" "` (`blank_token_id`) and the `eos_token_id` + + + Example: + + ```python + >>> from transformers import WhisperModel, WhisperConfig + + >>> # Initializing a Whisper tiny style configuration + >>> configuration = WhisperConfig() + + >>> # Initializing a model from the tiny style configuration + >>> model = WhisperModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + model_type = "whisper" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} + + def __init__( + self, + vocab_size=51865, + num_mel_bins=80, + encoder_layers=6, + encoder_attention_heads=4, + decoder_layers=6, + decoder_attention_heads=4, + decoder_ffn_dim=1536, + encoder_ffn_dim=1536, + encoder_layerdrop=0.0, + decoder_layerdrop=0.0, + decoder_start_token_id=50257, + use_cache=True, + is_encoder_decoder=True, + activation_function="gelu", + d_model=256, + dropout=0.0, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + scale_embedding=False, + max_source_positions=1500, + max_target_positions=448, + pad_token_id=50256, + bos_token_id=50257, + eos_token_id=50256, + tie_word_embeddings=True, + suppress_tokens=None, + begin_suppress_tokens=[220, 50256], + **kwargs + ): + self.vocab_size = vocab_size + self.num_mel_bins = num_mel_bins + self.d_model = d_model + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.decoder_layers = decoder_layers + self.decoder_attention_heads = decoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.encoder_ffn_dim = encoder_ffn_dim + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.encoder_layerdrop = encoder_layerdrop + self.decoder_layerdrop = decoder_layerdrop + self.use_cache = use_cache + self.num_hidden_layers = encoder_layers + self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True + self.tie_word_embeddings = tie_word_embeddings + self.max_source_positions = max_source_positions + self.max_target_positions = max_target_positions + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + is_encoder_decoder=is_encoder_decoder, + decoder_start_token_id=decoder_start_token_id, + tie_word_embeddings=tie_word_embeddings, + suppress_tokens=suppress_tokens, + begin_suppress_tokens=begin_suppress_tokens, + **kwargs, + ) diff --git a/src/transformers/models/whisper/english_normalizer.py b/src/transformers/models/whisper/english_normalizer.py new file mode 100644 index 00000000000000..11912bcc55b72f --- /dev/null +++ b/src/transformers/models/whisper/english_normalizer.py @@ -0,0 +1,602 @@ +# Copyright 2022 The OpenAI team and The HuggingFace Team. All rights reserved. +# Most of the code is copy pasted from the original whisper repository +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from fractions import Fraction +from typing import Iterator, List, Match, Optional, Union + +from ...utils import is_more_itertools_available + + +if is_more_itertools_available(): + from more_itertools import windowed + +import unicodedata + +import regex + + +# non-ASCII letters that are not separated by "NFKD" normalization +ADDITIONAL_DIACRITICS = { + "œ": "oe", + "Œ": "OE", + "ø": "o", + "Ø": "O", + "æ": "ae", + "Æ": "AE", + "ß": "ss", + "ẞ": "SS", + "đ": "d", + "Đ": "D", + "ð": "d", + "Ð": "D", + "þ": "th", + "Þ": "th", + "ł": "l", + "Ł": "L", +} + + +def remove_symbols_and_diacritics(s: str, keep=""): + """ + Replace any other markers, symbols, and punctuations with a space, and drop any diacritics (category 'Mn' and some + manual mappings) + """ + + def replace_character(char): + if char in keep: + return char + elif char in ADDITIONAL_DIACRITICS: + return ADDITIONAL_DIACRITICS[char] + + elif unicodedata.category(char) == "Mn": + return "" + + elif unicodedata.category(char)[0] in "MSP": + return " " + + return char + + return "".join(replace_character(c) for c in unicodedata.normalize("NFKD", s)) + + +def remove_symbols(s: str): + """ + Replace any other markers, symbols, punctuations with a space, keeping diacritics + """ + return "".join(" " if unicodedata.category(c)[0] in "MSP" else c for c in unicodedata.normalize("NFKC", s)) + + +class BasicTextNormalizer: + def __init__(self, remove_diacritics: bool = False, split_letters: bool = False): + self.clean = remove_symbols_and_diacritics if remove_diacritics else remove_symbols + self.split_letters = split_letters + + def __call__(self, s: str): + s = s.lower() + s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets + s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis + s = self.clean(s).lower() + + if self.split_letters: + s = " ".join(regex.findall(r"\X", s, regex.U)) + + s = re.sub(r"\s+", " ", s) # replace any successive whitespace characters with a space + + return s + + +class EnglishNumberNormalizer: + """ + Convert any spelled-out numbers into arabic numbers, while handling: + + - remove any commas + - keep the suffixes such as: `1960s`, `274th`, `32nd`, etc. + - spell out currency symbols after the number. e.g. `$20 million` -> `20000000 dollars` + - spell out `one` and `ones` + - interpret successive single-digit numbers as nominal: `one oh one` -> `101` + """ + + def __init__(self): + super().__init__() + + self.zeros = {"o", "oh", "zero"} + # fmt: off + self.ones = { + name: i + for i, name in enumerate( + ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen"], + start=1, + ) + } + # fmt: on + self.ones_plural = { + "sixes" if name == "six" else name + "s": (value, "s") for name, value in self.ones.items() + } + self.ones_ordinal = { + "zeroth": (0, "th"), + "first": (1, "st"), + "second": (2, "nd"), + "third": (3, "rd"), + "fifth": (5, "th"), + "twelfth": (12, "th"), + **{ + name + ("h" if name.endswith("t") else "th"): (value, "th") + for name, value in self.ones.items() + if value > 3 and value != 5 and value != 12 + }, + } + self.ones_suffixed = {**self.ones_plural, **self.ones_ordinal} + + self.tens = { + "twenty": 20, + "thirty": 30, + "forty": 40, + "fifty": 50, + "sixty": 60, + "seventy": 70, + "eighty": 80, + "ninety": 90, + } + self.tens_plural = {name.replace("y", "ies"): (value, "s") for name, value in self.tens.items()} + self.tens_ordinal = {name.replace("y", "ieth"): (value, "th") for name, value in self.tens.items()} + self.tens_suffixed = {**self.tens_plural, **self.tens_ordinal} + + self.multipliers = { + "hundred": 100, + "thousand": 1_000, + "million": 1_000_000, + "billion": 1_000_000_000, + "trillion": 1_000_000_000_000, + "quadrillion": 1_000_000_000_000_000, + "quintillion": 1_000_000_000_000_000_000, + "sextillion": 1_000_000_000_000_000_000_000, + "septillion": 1_000_000_000_000_000_000_000_000, + "octillion": 1_000_000_000_000_000_000_000_000_000, + "nonillion": 1_000_000_000_000_000_000_000_000_000_000, + "decillion": 1_000_000_000_000_000_000_000_000_000_000_000, + } + self.multipliers_plural = {name + "s": (value, "s") for name, value in self.multipliers.items()} + self.multipliers_ordinal = {name + "th": (value, "th") for name, value in self.multipliers.items()} + self.multipliers_suffixed = {**self.multipliers_plural, **self.multipliers_ordinal} + self.decimals = {*self.ones, *self.tens, *self.zeros} + + self.preceding_prefixers = { + "minus": "-", + "negative": "-", + "plus": "+", + "positive": "+", + } + self.following_prefixers = { + "pound": "£", + "pounds": "£", + "euro": "€", + "euros": "€", + "dollar": "$", + "dollars": "$", + "cent": "¢", + "cents": "¢", + } + self.prefixes = set(list(self.preceding_prefixers.values()) + list(self.following_prefixers.values())) + self.suffixers = { + "per": {"cent": "%"}, + "percent": "%", + } + self.specials = {"and", "double", "triple", "point"} + + self.words = set( + [ + key + for mapping in [ + self.zeros, + self.ones, + self.ones_suffixed, + self.tens, + self.tens_suffixed, + self.multipliers, + self.multipliers_suffixed, + self.preceding_prefixers, + self.following_prefixers, + self.suffixers, + self.specials, + ] + for key in mapping + ] + ) + self.literal_words = {"one", "ones"} + + def process_words(self, words: List[str]) -> Iterator[str]: + prefix: Optional[str] = None + value: Optional[Union[str, int]] = None + skip = False + + def to_fraction(s: str): + try: + return Fraction(s) + except ValueError: + return None + + def output(result: Union[str, int]): + nonlocal prefix, value + result = str(result) + if prefix is not None: + result = prefix + result + value = None + prefix = None + return result + + if len(words) == 0: + return + + for prev, current, next in windowed([None] + words + [None], 3): + if skip: + skip = False + continue + + next_is_numeric = next is not None and re.match(r"^\d+(\.\d+)?$", next) + has_prefix = current[0] in self.prefixes + current_without_prefix = current[1:] if has_prefix else current + if re.match(r"^\d+(\.\d+)?$", current_without_prefix): + # arabic numbers (potentially with signs and fractions) + f = to_fraction(current_without_prefix) + if f is None: + raise ValueError("Converting the fraction failed") + + if value is not None: + if isinstance(value, str) and value.endswith("."): + # concatenate decimals / ip address components + value = str(value) + str(current) + continue + else: + yield output(value) + + prefix = current[0] if has_prefix else prefix + if f.denominator == 1: + value = f.numerator # store integers as int + else: + value = current_without_prefix + elif current not in self.words: + # non-numeric words + if value is not None: + yield output(value) + yield output(current) + elif current in self.zeros: + value = str(value or "") + "0" + elif current in self.ones: + ones = self.ones[current] + + if value is None: + value = ones + elif isinstance(value, str) or prev in self.ones: + if prev in self.tens and ones < 10: # replace the last zero with the digit + value = value[:-1] + str(ones) + else: + value = str(value) + str(ones) + elif ones < 10: + if value % 10 == 0: + value += ones + else: + value = str(value) + str(ones) + else: # eleven to nineteen + if value % 100 == 0: + value += ones + else: + value = str(value) + str(ones) + elif current in self.ones_suffixed: + # ordinal or cardinal; yield the number right away + ones, suffix = self.ones_suffixed[current] + if value is None: + yield output(str(ones) + suffix) + elif isinstance(value, str) or prev in self.ones: + if prev in self.tens and ones < 10: + yield output(value[:-1] + str(ones) + suffix) + else: + yield output(str(value) + str(ones) + suffix) + elif ones < 10: + if value % 10 == 0: + yield output(str(value + ones) + suffix) + else: + yield output(str(value) + str(ones) + suffix) + else: # eleven to nineteen + if value % 100 == 0: + yield output(str(value + ones) + suffix) + else: + yield output(str(value) + str(ones) + suffix) + value = None + elif current in self.tens: + tens = self.tens[current] + if value is None: + value = tens + elif isinstance(value, str): + value = str(value) + str(tens) + else: + if value % 100 == 0: + value += tens + else: + value = str(value) + str(tens) + elif current in self.tens_suffixed: + # ordinal or cardinal; yield the number right away + tens, suffix = self.tens_suffixed[current] + if value is None: + yield output(str(tens) + suffix) + elif isinstance(value, str): + yield output(str(value) + str(tens) + suffix) + else: + if value % 100 == 0: + yield output(str(value + tens) + suffix) + else: + yield output(str(value) + str(tens) + suffix) + elif current in self.multipliers: + multiplier = self.multipliers[current] + if value is None: + value = multiplier + elif isinstance(value, str) or value == 0: + f = to_fraction(value) + p = f * multiplier if f is not None else None + if f is not None and p.denominator == 1: + value = p.numerator + else: + yield output(value) + value = multiplier + else: + before = value // 1000 * 1000 + residual = value % 1000 + value = before + residual * multiplier + elif current in self.multipliers_suffixed: + multiplier, suffix = self.multipliers_suffixed[current] + if value is None: + yield output(str(multiplier) + suffix) + elif isinstance(value, str): + f = to_fraction(value) + p = f * multiplier if f is not None else None + if f is not None and p.denominator == 1: + yield output(str(p.numerator) + suffix) + else: + yield output(value) + yield output(str(multiplier) + suffix) + else: # int + before = value // 1000 * 1000 + residual = value % 1000 + value = before + residual * multiplier + yield output(str(value) + suffix) + value = None + elif current in self.preceding_prefixers: + # apply prefix (positive, minus, etc.) if it precedes a number + if value is not None: + yield output(value) + + if next in self.words or next_is_numeric: + prefix = self.preceding_prefixers[current] + else: + yield output(current) + elif current in self.following_prefixers: + # apply prefix (dollars, cents, etc.) only after a number + if value is not None: + prefix = self.following_prefixers[current] + yield output(value) + else: + yield output(current) + elif current in self.suffixers: + # apply suffix symbols (percent -> '%') + if value is not None: + suffix = self.suffixers[current] + if isinstance(suffix, dict): + if next in suffix: + yield output(str(value) + suffix[next]) + skip = True + else: + yield output(value) + yield output(current) + else: + yield output(str(value) + suffix) + else: + yield output(current) + elif current in self.specials: + if next not in self.words and not next_is_numeric: + # apply special handling only if the next word can be numeric + if value is not None: + yield output(value) + yield output(current) + elif current == "and": + # ignore "and" after hundreds, thousands, etc. + if prev not in self.multipliers: + if value is not None: + yield output(value) + yield output(current) + elif current == "double" or current == "triple": + if next in self.ones or next in self.zeros: + repeats = 2 if current == "double" else 3 + ones = self.ones.get(next, 0) + value = str(value or "") + str(ones) * repeats + skip = True + else: + if value is not None: + yield output(value) + yield output(current) + elif current == "point": + if next in self.decimals or next_is_numeric: + value = str(value or "") + "." + else: + # should all have been covered at this point + raise ValueError(f"Unexpected token: {current}") + else: + # all should have been covered at this point + raise ValueError(f"Unexpected token: {current}") + + if value is not None: + yield output(value) + + def preprocess(self, s: str): + # replace " and a half" with " point five" + results = [] + + segments = re.split(r"\band\s+a\s+half\b", s) + for i, segment in enumerate(segments): + if len(segment.strip()) == 0: + continue + if i == len(segments) - 1: + results.append(segment) + else: + results.append(segment) + last_word = segment.rsplit(maxsplit=2)[-1] + if last_word in self.decimals or last_word in self.multipliers: + results.append("point five") + else: + results.append("and a half") + + s = " ".join(results) + + # put a space at number/letter boundary + s = re.sub(r"([a-z])([0-9])", r"\1 \2", s) + s = re.sub(r"([0-9])([a-z])", r"\1 \2", s) + + # but remove spaces which could be a suffix + s = re.sub(r"([0-9])\s+(st|nd|rd|th|s)\b", r"\1\2", s) + + return s + + def postprocess(self, s: str): + def combine_cents(m: Match): + try: + currency = m.group(1) + integer = m.group(2) + cents = int(m.group(3)) + return f"{currency}{integer}.{cents:02d}" + except ValueError: + return m.string + + def extract_cents(m: Match): + try: + return f"¢{int(m.group(1))}" + except ValueError: + return m.string + + # apply currency postprocessing; "$2 and ¢7" -> "$2.07" + s = re.sub(r"([€£$])([0-9]+) (?:and )?¢([0-9]{1,2})\b", combine_cents, s) + s = re.sub(r"[€£$]0.([0-9]{1,2})\b", extract_cents, s) + + # write "one(s)" instead of "1(s)", just for the readability + s = re.sub(r"\b1(s?)\b", r"one\1", s) + + return s + + def __call__(self, s: str): + s = self.preprocess(s) + s = " ".join(word for word in self.process_words(s.split()) if word is not None) + s = self.postprocess(s) + + return s + + +class EnglishSpellingNormalizer: + """ + Applies British-American spelling mappings as listed in [1]. + + [1] https://www.tysto.com/uk-us-spelling-list.html + """ + + def __init__(self, english_spelling_mapping): + self.mapping = english_spelling_mapping + + def __call__(self, s: str): + return " ".join(self.mapping.get(word, word) for word in s.split()) + + +class EnglishTextNormalizer: + def __init__(self, english_spelling_mapping): + self.ignore_patterns = r"\b(hmm|mm|mhm|mmm|uh|um)\b" + self.replacers = { + # common contractions + r"\bwon't\b": "will not", + r"\bcan't\b": "can not", + r"\blet's\b": "let us", + r"\bain't\b": "aint", + r"\by'all\b": "you all", + r"\bwanna\b": "want to", + r"\bgotta\b": "got to", + r"\bgonna\b": "going to", + r"\bi'ma\b": "i am going to", + r"\bimma\b": "i am going to", + r"\bwoulda\b": "would have", + r"\bcoulda\b": "could have", + r"\bshoulda\b": "should have", + r"\bma'am\b": "madam", + # contractions in titles/prefixes + r"\bmr\b": "mister ", + r"\bmrs\b": "missus ", + r"\bst\b": "saint ", + r"\bdr\b": "doctor ", + r"\bprof\b": "professor ", + r"\bcapt\b": "captain ", + r"\bgov\b": "governor ", + r"\bald\b": "alderman ", + r"\bgen\b": "general ", + r"\bsen\b": "senator ", + r"\brep\b": "representative ", + r"\bpres\b": "president ", + r"\brev\b": "reverend ", + r"\bhon\b": "honorable ", + r"\basst\b": "assistant ", + r"\bassoc\b": "associate ", + r"\blt\b": "lieutenant ", + r"\bcol\b": "colonel ", + r"\bjr\b": "junior ", + r"\bsr\b": "senior ", + r"\besq\b": "esquire ", + # prefect tenses, ideally it should be any past participles, but it's harder.. + r"'d been\b": " had been", + r"'s been\b": " has been", + r"'d gone\b": " had gone", + r"'s gone\b": " has gone", + r"'d done\b": " had done", # "'s done" is ambiguous + r"'s got\b": " has got", + # general contractions + r"n't\b": " not", + r"'re\b": " are", + r"'s\b": " is", + r"'d\b": " would", + r"'ll\b": " will", + r"'t\b": " not", + r"'ve\b": " have", + r"'m\b": " am", + } + self.standardize_numbers = EnglishNumberNormalizer() + self.standardize_spellings = EnglishSpellingNormalizer(english_spelling_mapping) + + def __call__(self, s: str): + s = s.lower() + + s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets + s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis + s = re.sub(self.ignore_patterns, "", s) + s = re.sub(r"\s+'", "'", s) # standardize when there's a space before an apostrophe + + for pattern, replacement in self.replacers.items(): + s = re.sub(pattern, replacement, s) + + s = re.sub(r"(\d),(\d)", r"\1\2", s) # remove commas between digits + s = re.sub(r"\.([^0-9]|$)", r" \1", s) # remove periods not followed by numbers + s = remove_symbols_and_diacritics(s, keep=".%$¢€£") # keep some symbols for numerics + + s = self.standardize_numbers(s) + s = self.standardize_spellings(s) + + # now remove prefix/suffix symbols that are not preceded/followed by numbers + s = re.sub(r"[.$¢€£]([^0-9])", r" \1", s) + s = re.sub(r"([^0-9])%", r"\1 ", s) + + s = re.sub(r"\s+", " ", s) # replace any successive whitespace characters with a space + + return s diff --git a/src/transformers/models/whisper/feature_extraction_whisper.py b/src/transformers/models/whisper/feature_extraction_whisper.py new file mode 100644 index 00000000000000..ce5de7b65afa81 --- /dev/null +++ b/src/transformers/models/whisper/feature_extraction_whisper.py @@ -0,0 +1,305 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Feature extractor class for Whisper +""" + +from typing import List, Optional, Union + +import numpy as np +from numpy.fft import fft + +from ...feature_extraction_sequence_utils import SequenceFeatureExtractor +from ...feature_extraction_utils import BatchFeature +from ...utils import TensorType, logging + + +logger = logging.get_logger(__name__) + + +class WhisperFeatureExtractor(SequenceFeatureExtractor): + r""" + Constructs a Whisper feature extractor. + + This feature extractor inherits from [`WhisperFeatureExtractor`] which contains most of the main methods. Users + should refer to this superclass for more information regarding those methods. + + This class extracts mel-filter bank features from raw speech using a custom numpy implementation of the `Short Time + Fourier Transform` which should match pytorch's `torch.stft` equivalent. + + Args: + feature_size (`int`, defaults to 80): + The feature dimension of the extracted features. + sampling_rate (`int`, defaults to 16000): + The sampling rate at which the audio files should be digitalized expressed in Hertz per second (Hz). + hop_length (`int`, defaults to 160): + Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients. + chunk_length (`int`, defaults to 30): + The maximum number of chuncks of `sampling_rate` samples used to trim and pad longer or shorter audio + sequences. + n_fft (`int`, defaults to 400): + Size of the Fourier transform. + padding_value (`float`, *optional*, defaults to 0.0): + Padding value used to pad the audio. Should correspond to silences. + """ + + model_input_names = ["input_features"] + + def __init__( + self, + feature_size=80, + sampling_rate=16000, + hop_length=160, + chunk_length=30, + n_fft=400, + padding_value=0.0, + **kwargs + ): + super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs) + self.n_fft = n_fft + self.hop_length = hop_length + self.chunk_length = chunk_length + self.return_attention_mask = True + self.n_samples = chunk_length * sampling_rate + self.nb_max_frames = self.n_samples // hop_length + self.sampling_rate = sampling_rate + self.mel_filters = self.get_mel_filters(sampling_rate, n_fft, n_mels=feature_size) + + def get_mel_filters(self, sr, n_fft, n_mels=128, dtype=np.float32): + # Initialize the weights + n_mels = int(n_mels) + weights = np.zeros((n_mels, int(1 + n_fft // 2)), dtype=dtype) + + # Center freqs of each FFT bin + fftfreqs = np.fft.rfftfreq(n=n_fft, d=1.0 / sr) + + # 'Center freqs' of mel bands - uniformly spaced between limits + min_mel = 0.0 + max_mel = 45.245640471924965 + + mels = np.linspace(min_mel, max_mel, n_mels + 2) + + mels = np.asanyarray(mels) + + # Fill in the linear scale + f_min = 0.0 + f_sp = 200.0 / 3 + freqs = f_min + f_sp * mels + + # And now the nonlinear scale + min_log_hz = 1000.0 # beginning of log region (Hz) + min_log_mel = (min_log_hz - f_min) / f_sp # same (Mels) + logstep = np.log(6.4) / 27.0 # step size for log region + + # If we have vector data, vectorize + log_t = mels >= min_log_mel + freqs[log_t] = min_log_hz * np.exp(logstep * (mels[log_t] - min_log_mel)) + + mel_f = freqs + + fdiff = np.diff(mel_f) + ramps = np.subtract.outer(mel_f, fftfreqs) + + for i in range(n_mels): + # lower and upper slopes for all bins + lower = -ramps[i] / fdiff[i] + upper = ramps[i + 2] / fdiff[i + 1] + + # .. then intersect them with each other and zero + weights[i] = np.maximum(0, np.minimum(lower, upper)) + + # Slaney-style mel is scaled to be approx constant energy per channel + enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels]) + weights *= enorm[:, np.newaxis] + + return weights + + def fram_wave(self, waveform, center=True): + """ + Transform a raw waveform into a list of smaller waveforms. The window length defines how much of the signal is + contain in each frame (smalle waveform), while the hope length defines the step between the beginning of each + new frame. + + Centering is done by reflecting the waveform which is first centered around `frame_idx * hop_length`. + """ + frames = [] + for i in range(0, waveform.shape[0] + 1, self.hop_length): + half_window = (self.n_fft - 1) // 2 + 1 + if center: + start = i - half_window if i > half_window else 0 + end = i + half_window if i < waveform.shape[0] - half_window else waveform.shape[0] + + frame = waveform[start:end] + + if start == 0: + padd_width = (-i + half_window, 0) + frame = np.pad(frame, pad_width=padd_width, mode="reflect") + + elif end == waveform.shape[0]: + padd_width = (0, (i - waveform.shape[0] + half_window)) + frame = np.pad(frame, pad_width=padd_width, mode="reflect") + + else: + frame = waveform[i : i + self.n_fft] + frame_width = frame.shape[0] + if frame_width < waveform.shape[0]: + frame = np.lib.pad( + frame, pad_width=(0, self.n_fft - frame_width), mode="constant", constant_values=0 + ) + + frames.append(frame) + return np.stack(frames, 0) + + def stft(self, frames, window): + """ + Calculates the complex Short-Time Fourier Transform (STFT) of the given framed signal. Should give the same + results as `torch.stft`. + """ + frame_size = frames.shape[1] + fft_size = self.n_fft + + if fft_size is None: + fft_size = frame_size + + if fft_size < frame_size: + raise ValueError("FFT size must greater or equal the frame size") + # number of FFT bins to store + num_fft_bins = (fft_size >> 1) + 1 + + data = np.empty((len(frames), num_fft_bins), dtype=np.complex64) + fft_signal = np.zeros(fft_size) + + for f, frame in enumerate(frames): + if window is not None: + np.multiply(frame, window, out=fft_signal[:frame_size]) + else: + fft_signal[:frame_size] = frame + data[f] = fft(fft_signal, axis=0)[:num_fft_bins] + return data.T + + def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray: + """ + Compute the log-Mel spectrogram of the provided audio, gives similar results whisper's original torch + implementation with 1e-5 tolerance. + """ + window = np.hanning(self.n_fft + 1)[:-1] + + frames = self.fram_wave(waveform) + stft = self.stft(frames, window=window) + magnitudes = np.abs(stft[:, :-1]) ** 2 + + filters = self.mel_filters + mel_spec = filters @ magnitudes + + log_spec = np.log10(np.clip(mel_spec, a_min=1e-10, a_max=None)) + log_spec = np.maximum(log_spec, log_spec.max() - 8.0) + log_spec = (log_spec + 4.0) / 4.0 + + return log_spec + + def __call__( + self, + raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], + truncation: bool = True, + pad_to_multiple_of: Optional[int] = None, + return_tensors: Optional[Union[str, TensorType]] = None, + return_attention_mask: Optional[bool] = None, + padding: Optional[str] = "max_length", + max_length: Optional[int] = None, + **kwargs + ) -> BatchFeature: + """ + Main method to featurize and prepare for the model one or several sequence(s). sequences. + + Args: + raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): + The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float + values, a list of numpy arrays or a list of list of float values. + truncation (`bool`, *optional*, default to `True`): + Activates truncation to cut input sequences longer than *max_length* to *max_length*. + pad_to_multiple_of (`int`, *optional*, defaults to None): + If set will pad the sequence to a multiple of the provided value. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + >= 7.5 (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. + return_attention_mask (`bool`, *optional*): + Whether to return the attention mask. If left to the default, will return the attention mask according + to the specific feature_extractor's default. + + [What are attention masks?](../glossary#attention-mask) + + + + For WhisperTransoformer models, `attention_mask` should alwys be passed for batched inference, to avoid + subtle bugs. + + + + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors instead of list of python integers. Acceptable values are: + + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return Numpy `np.ndarray` objects. + sampling_rate (`int`, *optional*): + The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass + `sampling_rate` at the forward call to prevent silent errors. + padding_value (`float`, defaults to 0.0): + The value that is used to fill the padding values / vectors. + """ + + is_batched = bool( + isinstance(raw_speech, (list, tuple)) + and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list))) + ) + + if is_batched: + raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech] + elif not is_batched and not isinstance(raw_speech, np.ndarray): + raw_speech = np.asarray(raw_speech, dtype=np.float32) + elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64): + raw_speech = raw_speech.astype(np.float32) + + # always return batch + if not is_batched: + raw_speech = [np.asarray([raw_speech]).T] + + batched_speech = BatchFeature({"input_features": raw_speech}) + + # convert into correct format for padding + + padded_inputs = self.pad( + batched_speech, + padding=padding, + max_length=max_length if max_length else self.n_samples, + truncation=truncation, + pad_to_multiple_of=pad_to_multiple_of, + return_attention_mask=False, + **kwargs, + ) + # make sure list is in array format + input_features = padded_inputs.get("input_features").transpose(2, 0, 1) + + input_features = [self._np_extract_fbank_features(waveform) for waveform in input_features[0]] + + if isinstance(input_features[0], List): + padded_inputs["input_features"] = [np.asarray(feature, dtype=np.float32) for feature in input_features] + else: + padded_inputs["input_features"] = input_features + + if return_tensors is not None: + padded_inputs = padded_inputs.convert_to_tensors(return_tensors) + + return padded_inputs diff --git a/src/transformers/models/whisper/modeling_whisper.py b/src/transformers/models/whisper/modeling_whisper.py new file mode 100644 index 00000000000000..ef23914b8ce77d --- /dev/null +++ b/src/transformers/models/whisper/modeling_whisper.py @@ -0,0 +1,1230 @@ +# coding=utf-8 +# Copyright 2022 The OpenAI Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch Whisper model.""" + + +import math +import random +from typing import Optional, Tuple + +import torch +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_whisper import WhisperConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "WhisperConfig" + + +WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "openai/whisper-base", + # See all Whisper models at https://huggingface.co/models?filter=whisper +] + + +# Copied from transformers.models.bart.modeling_bart.shift_tokens_right +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +# Copied from transformers.models.bart.modeling_bart._make_causal_mask +def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) + mask_cond = torch.arange(mask.size(-1)) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +# Copied from transformers.models.bart.modeling_bart._expand_mask +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class WhisperPositionalEmbedding(nn.Embedding): + def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): + super().__init__(num_positions, embedding_dim) + + def forward(self, input_ids, past_key_values_length=0): + + return self.weight[past_key_values_length : past_key_values_length + input_ids.shape[-1]] + + +class WhisperAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + # Copied from transformers.models.bart.modeling_bart.BartAttention._shape with BART->whisper + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + # Copied from transformers.models.bart.modeling_bart.BartAttention.forward with BART->whisper + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned aross GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +# Copied from transformers.models.speech_to_text.modeling_speech_to_text.Speech2TextEncoderLayer with Speech2Text->Whisper +class WhisperEncoderLayer(nn.Module): + def __init__(self, config: WhisperConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = WhisperAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: torch.Tensor, + layer_head_mask: torch.Tensor, + output_attentions: bool = False, + ): + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(config.encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + if hidden_states.dtype == torch.float16 and ( + torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.speech_to_text.modeling_speech_to_text.Speech2TextDecoderLayer with Speech2Text->Whisper +class WhisperDecoderLayer(nn.Module): + def __init__(self, config: WhisperConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = WhisperAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.encoder_attn = WhisperAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + cross_attn_layer_head_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ): + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(seq_len, batch, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of + size *(decoder_attention_heads,)*. + past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class WhisperPreTrainedModel(PreTrainedModel): + config_class = WhisperConfig + base_model_prefix = "model" + main_input_name = "input_features" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, (nn.Linear, nn.Conv1d)): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (WhisperDecoder, WhisperEncoder)): + module.gradient_checkpointing = value + + def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor): + """ + Computes the output length of the convolutional layers + """ + input_lengths = (input_lengths - 1) // 2 + 1 + + return input_lengths + + +WHISPER_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`WhisperConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +WHISPER_INPUTS_DOCSTRING = r""" + Args: + input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`): + Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by + loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via + the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the + [`WhisperFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a + tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`] + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + Whisper uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. If + `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should read + [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the BART + paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`torch.FloatTensor` of + shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing + `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is + used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is + useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class WhisperEncoder(WhisperPreTrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`WhisperEncoderLayer`]. + + Args: + config: WhisperConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: WhisperConfig): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + + embed_dim = config.d_model + self.num_mel_bins = config.num_mel_bins + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_source_positions + self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 + + self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1) + self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1) + + self.embed_positions = nn.Embedding(self.max_source_positions, embed_dim) + + self.layers = nn.ModuleList([WhisperEncoderLayer(config) for _ in range(config.encoder_layers)]) + self.layer_norm = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def forward( + self, + input_features, + head_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`): + Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be + obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a + `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into + `input_features`, the [`WhisperFeatureExtractor`] should be used for extracting the mel features, + padding and conversion into a tensor of type `torch.FloatTensor`. See + [`~WhisperFeatureExtractor.__call__`] + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + inputs_embeds = nn.functional.gelu(self.conv1(input_features)) + inputs_embeds = nn.functional.gelu(self.conv2(inputs_embeds)) + + inputs_embeds = inputs_embeds.permute(0, 2, 1) + embed_pos = self.embed_positions.weight + + hidden_states = inputs_embeds + embed_pos + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + assert head_mask.size()[0] == ( + len(self.layers) + ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): # skip the layer + layer_outputs = (None, None) + else: + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + None, + (head_mask[idx] if head_mask is not None else None), + ) + else: + layer_outputs = encoder_layer( + hidden_states, + None, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + hidden_states = self.layer_norm(hidden_states) + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class WhisperDecoder(WhisperPreTrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`WhisperDecoderLayer`] + + Args: + config: WhisperConfig + """ + + def __init__(self, config: WhisperConfig): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_target_positions + self.max_source_positions = config.max_source_positions + self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 + + self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) + self.embed_positions = WhisperPositionalEmbedding(self.max_target_positions, config.d_model) + + self.layers = nn.ModuleList([WhisperDecoderLayer(config) for _ in range(config.decoder_layers)]) + + self.layer_norm = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length + ).to(inputs_embeds.device) + + if attention_mask is not None: + if attention_mask.shape[-1] > input_shape[-1] > 0: + attention_mask = attention_mask[:, : input_shape[-1] + past_key_values_length] + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + def forward( + self, + input_ids=None, + attention_mask=None, + encoder_hidden_states=None, + head_mask=None, + cross_attn_head_mask=None, + past_key_values=None, + inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention + on hidden heads. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of + shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing + `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more + control over how to convert `input_ids` indices into associated vectors than the model's internal + embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # embed positions + positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) + + hidden_states = inputs_embeds + positions + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): + if attn_mask is not None: + assert attn_mask.size()[0] == (len(self.layers)), ( + f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warning( + "`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache =" + " False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, use_cache) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + encoder_hidden_states, + head_mask[idx] if head_mask is not None else None, + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + None, + ) + else: + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=( + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None + ), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + hidden_states = self.layer_norm(hidden_states) + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare Whisper Model outputting raw hidden-states without any specific head on top.", + WHISPER_START_DOCSTRING, +) +class WhisperModel(WhisperPreTrainedModel): + _keys_to_ignore_on_load_missing = [r"proj_out.weight"] + + def __init__(self, config: WhisperConfig): + super().__init__(config) + + self.encoder = WhisperEncoder(config) + self.decoder = WhisperDecoder(config) + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.decoder.embed_tokens = value + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_features=None, + decoder_input_ids=None, + decoder_attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + encoder_outputs=None, + past_key_values=None, + decoder_inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + Returns: + + Example: + + ```python + >>> import torch + >>> from transformers import WhisperModel, WhisperFeatureExtractor + >>> from datasets import load_dataset + + >>> model = WhisperModel.from_pretrained("openai/whisper-base") + >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") + >>> input_features = inputs.input_features + >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id + >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state + >>> list(last_hidden_state.shape) + [1, 2, 512] + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_features, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return Seq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + "The Whisper Model with a language modeling head. Can be used for automatic speech recognition.", + WHISPER_START_DOCSTRING, +) +class WhisperForConditionalGeneration(WhisperPreTrainedModel): + base_model_prefix = "model" + _keys_to_ignore_on_load_missing = [ + r"encoder.version", + r"decoder.version", + r"proj_out.weight", + ] + _keys_to_ignore_on_save = [ + r"proj_out.weight", + ] + + def __init__(self, config: WhisperConfig): + super().__init__(config) + self.model = WhisperModel(config) + self.proj_out = nn.Linear(config.d_model, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_encoder(self): + return self.model.get_encoder() + + def get_decoder(self): + return self.model.get_decoder() + + def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: + new_embeddings = super().resize_token_embeddings(new_num_tokens) + return new_embeddings + + def get_output_embeddings(self): + return self.proj_out + + def set_output_embeddings(self, new_embeddings): + self.proj_out = new_embeddings + + @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_features=None, + decoder_input_ids=None, + decoder_attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + encoder_outputs=None, + past_key_values=None, + decoder_inputs_embeds=None, + labels=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + ): + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` + or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is + only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> import torch + >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration + >>> from datasets import load_dataset + + >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + + >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") + >>> input_features = inputs.input_features + + >>> generated_ids = model.generate(inputs=input_features) + + >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + >>> transcription + ' Mr. Quilter is the apostle of the middle classes, and we are glad to' + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if decoder_input_ids is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + outputs = self.model( + input_features, + decoder_input_ids=decoder_input_ids, + encoder_outputs=encoder_outputs, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + lm_logits = self.proj_out(outputs[0]) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return Seq2SeqLMOutput( + loss=loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + def prepare_inputs_for_generation( + self, decoder_input_ids, past=None, use_cache=None, encoder_outputs=None, attention_mask=None, **kwargs + ): + # cut decoder_input_ids if past is used + if past is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + return { + "encoder_outputs": encoder_outputs, + "past_key_values": past, + "decoder_input_ids": decoder_input_ids, + "use_cache": use_cache, + "decoder_attention_mask": None, + } + + # + @staticmethod + def _reorder_cache(past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) + return reordered_past diff --git a/src/transformers/models/whisper/processing_whisper.py b/src/transformers/models/whisper/processing_whisper.py new file mode 100644 index 00000000000000..3bdcb0f51f2f56 --- /dev/null +++ b/src/transformers/models/whisper/processing_whisper.py @@ -0,0 +1,124 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Speech processor class for Whisper +""" + +from ...processing_utils import ProcessorMixin + + +class WhisperProcessor(ProcessorMixin): + r""" + Constructs a Whisper processor which wraps a Whisper feature extractor and a Whisper tokenizer into a single + processor. + + [`WhisperProcessor`] offers all the functionalities of [`WhisperFeatureExtractor`] and [`WhisperTokenizer`]. See + the [`~WhisperProcessor.__call__`] and [`~WhisperProcessor.decode`] for more information. + + Args: + feature_extractor (`WhisperFeatureExtractor`): + An instance of [`WhisperFeatureExtractor`]. The feature extractor is a required input. + tokenizer (`WhisperTokenizer`): + An instance of [`WhisperTokenizer`]. The tokenizer is a required input. + """ + feature_extractor_class = "WhisperFeatureExtractor" + tokenizer_class = "WhisperTokenizer" + + def __init__(self, feature_extractor, tokenizer): + super().__init__(feature_extractor, tokenizer) + self.current_processor = self.feature_extractor + self._in_target_context_manager = False + + def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True): + forced_decoder_tokens = "" + + if language is not None: + if f"<|{language}|>" not in self.tokenizer.additional_special_tokens: + raise ValueError( + f"{language} is not supported. The language should be one of the following: '<|en|>'," + " '<|zh|>', '<|de|>', '<|es|>', '<|ru|>', '<|ko|>', '<|fr|>', '<|ja|>', '<|pt|>', '<|tr|>'," + " '<|pl|>', '<|ca|>', '<|nl|>', '<|ar|>', '<|sv|>', '<|it|>', '<|id|>', '<|hi|>', '<|fi|>'," + " '<|vi|>', '<|iw|>', '<|uk|>', '<|el|>', '<|ms|>', '<|cs|>', '<|ro|>', '<|da|>', '<|hu|>'," + " '<|ta|>', '<|no|>', '<|th|>', '<|ur|>', '<|hr|>', '<|bg|>', '<|lt|>', '<|la|>', '<|mi|>'," + " '<|ml|>', '<|cy|>', '<|sk|>', '<|te|>', '<|fa|>', '<|lv|>', '<|bn|>', '<|sr|>', '<|az|>'," + " '<|sl|>', '<|kn|>', '<|et|>', '<|mk|>', '<|br|>', '<|eu|>', '<|is|>', '<|hy|>', '<|ne|>'," + " '<|mn|>', '<|bs|>', '<|kk|>', '<|sq|>', '<|sw|>', '<|gl|>', '<|mr|>', '<|pa|>', '<|si|>'," + " '<|km|>', '<|sn|>', '<|yo|>', '<|so|>', '<|af|>', '<|oc|>', '<|ka|>', '<|be|>', '<|tg|>'," + " '<|sd|>', '<|gu|>', '<|am|>', '<|yi|>', '<|lo|>', '<|uz|>', '<|fo|>', '<|ht|>', '<|ps|>'," + " '<|tk|>', '<|nn|>', '<|mt|>', '<|sa|>', '<|lb|>', '<|my|>', '<|bo|>', '<|tl|>', '<|mg|>'," + " '<|as|>', '<|tt|>', '<|haw|>', '<|ln|>', '<|ha|>', '<|ba|>', '<|jw|>', '<|su|>'" + ) + forced_decoder_tokens += f"<|{language}|>" + + if task is not None: + if f"<|{task}|>" not in self.tokenizer.additional_special_tokens: + raise ValueError( + f"'{task}' is not supported. The language should be in : {{'transcribe', 'translate'}}" + ) + forced_decoder_tokens += f"<|{task}|>" + + forced_decoder_tokens += "<|notimestamps|>" if no_timestamps else "" + ids = self.tokenizer.encode(forced_decoder_tokens) + forced_decoder_ids = [(rank + 1, token) for rank, token in enumerate(ids)] + return forced_decoder_ids + + def __call__(self, *args, **kwargs): + """ + When used in normal mode, this method forwards all its arguments to WhisperFeatureExtractor's + [`~WhisperFeatureExtractor.__call__`] and returns its output. If used in the context + [`~WhisperProcessor.as_target_processor`] this method forwards all its arguments to WhisperTokenizer's + [`~WhisperTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information. + + """ + # For backward compatibility + if self._in_target_context_manager: + return self.current_processor(*args, **kwargs) + + audio = kwargs.pop("audio", None) + text = kwargs.pop("text", None) + if len(args) > 0: + audio = args[0] + args = args[1:] + + if audio is None and text is None: + raise ValueError("You need to specify either an `audio` or `text` input to process.") + + if audio is not None: + inputs = self.feature_extractor(audio, *args, **kwargs) + if text is not None: + encodings = self.tokenizer(text, **kwargs) + + if text is None: + return inputs + + elif audio is None: + return encodings + else: + inputs["labels"] = encodings["input_ids"] + return inputs + + def batch_decode(self, *args, **kwargs): + """ + This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please + refer to the docstring of this method for more information. + """ + return self.tokenizer.batch_decode(*args, **kwargs) + + def decode(self, *args, **kwargs): + """ + This method forwards all its arguments to WhisperTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to + the docstring of this method for more information. + """ + return self.tokenizer.decode(*args, **kwargs) diff --git a/src/transformers/models/whisper/tokenization_whisper.py b/src/transformers/models/whisper/tokenization_whisper.py new file mode 100644 index 00000000000000..696aa4f4e5f0f9 --- /dev/null +++ b/src/transformers/models/whisper/tokenization_whisper.py @@ -0,0 +1,401 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for Whisper.""" +import json +import os +from typing import List, Optional, Tuple, Union + +import regex as re + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer +from ...utils import logging +from .english_normalizer import EnglishTextNormalizer + + +VOCAB_FILES_NAMES = { + "vocab_file": "vocab.json", + "tokenizer_file": "tokenizer.json", + "merges_file": "merges.txt", + "normalizer_file": "normalizer.json", +} + +PRETRAINED_VOCAB_FILES_MAP = { + "vocab_file": { + "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/vocab.json", + }, + "merges_file": {"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/merges_file.txt"}, + "normalizer_file": { + "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/normalizer.json" + }, +} + +MAX_MODEL_INPUT_SIZES = { + "openai/whisper-base": 448, +} + + +# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +logger = logging.get_logger(__name__) + + +# Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class WhisperTokenizer(PreTrainedTokenizer): + """ + Construct an Whisper tokenizer. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains some of the main methods. Users should refer to + the superclass for more information regarding such methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + normalizer_file (`str`, *optional*, defaults to `None`): + Path to the normalizer_file file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The beginning of sequence token. + eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): + The end of sequence token. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. + add_bos_token (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial <|endoftext|> to the input. This allows to treat the leading word just as + any other word. + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + max_model_input_sizes = MAX_MODEL_INPUT_SIZES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + merges_file, + normalizer_file=None, + errors="replace", + unk_token="<|endoftext|>", + bos_token="<|endoftext|>", + eos_token="<|endoftext|>", + pad_token=None, + add_prefix_space=False, + add_bos_token=False, + **kwargs + ): + + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token + super().__init__( + errors=errors, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + pad_token=pad_token, + add_prefix_space=add_prefix_space, + add_bos_token=add_bos_token, + **kwargs, + ) + self.add_bos_token = add_bos_token + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().split("\n")[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + self.add_prefix_space = add_prefix_space + + if normalizer_file is not None: + with open(normalizer_file, encoding="utf-8") as vocab_handle: + self.english_spelling_normalizer = json.load(vocab_handle) + else: + self.english_spelling_normalizer = None + + # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions + self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + def get_vocab(self): + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + @property + def vocab_size(self) -> int: + return len(self.encoder) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe with GPT2 -> Whisper + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.build_inputs_with_special_tokens with GPT2 -> Whisper + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + if self.add_bos_token: + bos_token_ids = [self.bos_token_id] + else: + bos_token_ids = [] + + output = bos_token_ids + token_ids_0 + + if token_ids_1 is None: + return output + + return output + bos_token_ids + token_ids_1 + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_special_tokens_mask with GPT2 -> Whisper + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if not self.add_bos_token: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize with GPT2 -> Whisper + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id with GPT2 -> Whisper + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index, self.decoder.get(self.unk_token_id)) + + def _normalize(self, text): + """ + Normalize a given string using the `EnglishTextNormalizer` class, which preforms commons transformation on + english text. + """ + normalizer = EnglishTextNormalizer(self.english_spelling_normalizer) + return normalizer(text) + + def _decode( + self, token_ids: Union[int, List[int]], skip_special_tokens: bool = False, normalize: bool = False, **kwargs + ) -> str: + self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) + + filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) + + # To avoid mixing byte-level and unicode for byte-level BPT + # we need to build string separately for added tokens and byte-level tokens + # cf. https://github.com/huggingface/transformers/issues/1133 + sub_texts = [] + current_sub_text = [] + for token in filtered_tokens: + if skip_special_tokens and token in self.all_special_ids: + continue + if token in self.added_tokens_encoder: + if current_sub_text: + sub_texts.append(self.convert_tokens_to_string(current_sub_text)) + current_sub_text = [] + sub_texts.append(token) + else: + current_sub_text.append(token) + if current_sub_text: + sub_texts.append(self.convert_tokens_to_string(current_sub_text)) + + text = "".join(sub_texts) + + if normalize: + clean_text = self._normalize(text) + return clean_text + else: + return text + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string with GPT2 -> Whisper + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + normalizer_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["normalizer_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + if self.english_spelling_normalizer is not None: + with open(normalizer_file, "w", encoding="utf-8") as f: + f.write( + json.dumps(self.english_spelling_normalizer, indent=2, sort_keys=True, ensure_ascii=False) + "\n" + ) + + return vocab_file, merge_file, normalizer_file + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.prepare_for_tokenization with GPT2 -> Whisper + def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): + add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) + if is_split_into_words or add_prefix_space: + text = " " + text + return (text, kwargs) + + # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._build_conversation_input_ids with GPT2 -> Whisper + def _build_conversation_input_ids(self, conversation) -> List[int]: + input_ids = [] + for is_user, text in conversation.iter_texts(): + input_ids.extend(self.encode(text, add_special_tokens=False) + [self.eos_token_id]) + if len(input_ids) > self.model_max_length: + input_ids = input_ids[-self.model_max_length :] + return input_ids diff --git a/src/transformers/utils/__init__.py b/src/transformers/utils/__init__.py index 24d32d0a012278..2269f225485820 100644 --- a/src/transformers/utils/__init__.py +++ b/src/transformers/utils/__init__.py @@ -100,6 +100,7 @@ is_ipex_available, is_jumanpp_available, is_librosa_available, + is_more_itertools_available, is_ninja_available, is_onnx_available, is_pandas_available, diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index d1addf8d4c1c6d..ef1a6baafabc16 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -5444,6 +5444,30 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class WhisperForConditionalGeneration(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + +class WhisperPreTrainedModel(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/src/transformers/utils/import_utils.py b/src/transformers/utils/import_utils.py index 2fc52b52a2025f..140cfc78f4edd1 100644 --- a/src/transformers/utils/import_utils.py +++ b/src/transformers/utils/import_utils.py @@ -456,6 +456,10 @@ def is_detectron2_available(): return _detectron2_available +def is_more_itertools_available(): + return importlib.util.find_spec("more_itertools") is not None + + def is_rjieba_available(): return importlib.util.find_spec("rjieba") is not None diff --git a/tests/models/whisper/__init__.py b/tests/models/whisper/__init__.py new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/tests/models/whisper/test_feature_extraction_whisper.py b/tests/models/whisper/test_feature_extraction_whisper.py new file mode 100644 index 00000000000000..c67cab78200164 --- /dev/null +++ b/tests/models/whisper/test_feature_extraction_whisper.py @@ -0,0 +1,225 @@ +# coding=utf-8 +# Copyright 2022 HuggingFace Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import itertools +import os +import random +import tempfile +import unittest + +import numpy as np + +from transformers import is_speech_available +from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio +from transformers.utils.import_utils import is_torch_available + +from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin + + +if is_speech_available(): + from transformers import WhisperFeatureExtractor + +if is_torch_available(): + import torch + +global_rng = random.Random() + + +def floats_list(shape, scale=1.0, rng=None, name=None): + """Creates a random float32 tensor""" + if rng is None: + rng = global_rng + + values = [] + for batch_idx in range(shape[0]): + values.append([]) + for _ in range(shape[1]): + values[-1].append(rng.random() * scale) + + return values + + +@require_torch +@require_torchaudio +class WhisperFeatureExtractionTester(unittest.TestCase): + def __init__( + self, + parent, + batch_size=7, + min_seq_length=400, + max_seq_length=2000, + feature_size=10, + hop_length=160, + chunk_length=8, + padding_value=0.0, + sampling_rate=4_000, + return_attention_mask=True, + do_normalize=True, + ): + self.parent = parent + self.batch_size = batch_size + self.min_seq_length = min_seq_length + self.max_seq_length = max_seq_length + self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) + self.padding_value = padding_value + self.sampling_rate = sampling_rate + self.return_attention_mask = return_attention_mask + self.do_normalize = do_normalize + self.feature_size = feature_size + self.chunk_length = chunk_length + self.hop_length = hop_length + + def prepare_feat_extract_dict(self): + return { + "feature_size": self.feature_size, + "hop_length": self.hop_length, + "chunk_length": self.chunk_length, + "padding_value": self.padding_value, + "sampling_rate": self.sampling_rate, + "return_attention_mask": self.return_attention_mask, + "do_normalize": self.do_normalize, + } + + def prepare_inputs_for_common(self, equal_length=False, numpify=False): + def _flatten(list_of_lists): + return list(itertools.chain(*list_of_lists)) + + if equal_length: + speech_inputs = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] + else: + # make sure that inputs increase in size + speech_inputs = [ + floats_list((x, self.feature_size)) + for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) + ] + if numpify: + speech_inputs = [np.asarray(x) for x in speech_inputs] + return speech_inputs + + +@require_torch +@require_torchaudio +class WhisperFeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): + + feature_extraction_class = WhisperFeatureExtractor if is_speech_available() else None + + def setUp(self): + self.feat_extract_tester = WhisperFeatureExtractionTester(self) + + def test_feat_extract_from_and_save_pretrained(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] + check_json_file_has_correct_format(saved_file) + feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + mel_1 = dict_first.pop("mel_filters") + mel_2 = dict_second.pop("mel_filters") + self.assertTrue(np.allclose(mel_1, mel_2)) + self.assertEqual(dict_first, dict_second) + + def test_feat_extract_to_json_file(self): + feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) + + with tempfile.TemporaryDirectory() as tmpdirname: + json_file_path = os.path.join(tmpdirname, "feat_extract.json") + feat_extract_first.to_json_file(json_file_path) + feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) + + dict_first = feat_extract_first.to_dict() + dict_second = feat_extract_second.to_dict() + mel_1 = dict_first.pop("mel_filters") + mel_2 = dict_second.pop("mel_filters") + self.assertTrue(np.allclose(mel_1, mel_2)) + self.assertEqual(dict_first, dict_second) + + def test_call(self): + # Tests that all call wrap to encode_plus and batch_encode_plus + feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + # create three inputs of length 800, 1000, and 1200 + speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] + np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] + + # Test feature size + input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features + self.assertTrue(input_features.ndim == 3) + self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames) + self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size) + + # Test not batched input + encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features + self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) + + # Test batched + encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + # Test truncation required + speech_inputs = [floats_list((1, x))[0] for x in range(200, (feature_extractor.n_samples + 500), 200)] + np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] + + speech_inputs_truncated = [x[: feature_extractor.n_samples] for x in speech_inputs] + np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated] + + encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features + encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features + for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): + self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) + + def test_double_precision_pad(self): + import torch + + feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) + np_speech_inputs = np.random.rand(100, 32).astype(np.float64) + py_speech_inputs = np_speech_inputs.tolist() + + for inputs in [py_speech_inputs, np_speech_inputs]: + np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np") + self.assertTrue(np_processed.input_features.dtype == np.float32) + pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt") + self.assertTrue(pt_processed.input_features.dtype == torch.float32) + + def _load_datasamples(self, num_samples): + from datasets import load_dataset + + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + # automatic decoding with librispeech + speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] + + return [x["array"] for x in speech_samples] + + def test_integration(self): + # fmt: off + EXPECTED_INPUT_FEATURES = torch.tensor( + [ + 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, + 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, + 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, + -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 + ] + ) + # fmt: on + + input_speech = self._load_datasamples(1) + feaure_extractor = WhisperFeatureExtractor() + input_features = feaure_extractor(input_speech, return_tensors="pt").input_features + self.assertTrue(torch.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4)) diff --git a/tests/models/whisper/test_modeling_whisper.py b/tests/models/whisper/test_modeling_whisper.py new file mode 100644 index 00000000000000..e07d8122a5e5c7 --- /dev/null +++ b/tests/models/whisper/test_modeling_whisper.py @@ -0,0 +1,1042 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the PyTorch Whisper model. """ + +import copy +import inspect +import os +import tempfile +import unittest + +from transformers import WhisperConfig +from transformers.testing_utils import is_torch_available, require_torch, require_torchaudio, slow, torch_device +from transformers.utils import cached_property +from transformers.utils.import_utils import is_datasets_available + +from ...generation.test_generation_utils import GenerationTesterMixin +from ...test_configuration_common import ConfigTester +from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor + + +if is_datasets_available(): + import datasets + from datasets import load_dataset + +if is_torch_available(): + import torch + + from transformers import ( + WhisperFeatureExtractor, + WhisperForConditionalGeneration, + WhisperModel, + WhisperProcessor, + set_seed, + ) + from transformers.models.whisper.modeling_whisper import WhisperDecoder, WhisperEncoder + + +def prepare_whisper_inputs_dict( + config, + input_features, + decoder_input_ids, + attention_mask=None, + decoder_attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, +): + if decoder_attention_mask is None: + decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) + if head_mask is None: + head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) + if decoder_head_mask is None: + decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) + if cross_attn_head_mask is None: + cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) + return { + # "input_ids": input_features, + "input_features": input_features, + "decoder_input_ids": decoder_input_ids, + "decoder_attention_mask": decoder_attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + } + + +@require_torch +class WhisperModelTester: + def __init__( + self, + parent, + batch_size=13, + seq_length=60, + is_training=True, + use_labels=False, + vocab_size=99, + hidden_size=16, + num_hidden_layers=2, + num_attention_heads=4, + input_channels=1, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=20, + max_source_positions=30, + max_target_positions=40, + bos_token_id=98, + eos_token_id=98, + pad_token_id=0, + num_mel_bins=80, + decoder_start_token_id=85, + num_conv_layers=1, + suppress_tokens=None, + begin_suppress_tokens=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.input_channels = input_channels + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.num_mel_bins = num_mel_bins + self.max_position_embeddings = max_position_embeddings + self.max_source_positions = max_source_positions + self.max_target_positions = max_target_positions + self.eos_token_id = eos_token_id + self.pad_token_id = pad_token_id + self.bos_token_id = bos_token_id + self.decoder_start_token_id = decoder_start_token_id + self.num_conv_layers = num_conv_layers + self.suppress_tokens = suppress_tokens + self.begin_suppress_tokens = begin_suppress_tokens + + def prepare_config_and_inputs(self): + input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) + + decoder_input_ids = torch.tensor(self.batch_size * [[self.decoder_start_token_id]], device=torch_device) + + config = self.get_config() + inputs_dict = prepare_whisper_inputs_dict( + config, + attention_mask=None, + input_features=input_features, + decoder_input_ids=decoder_input_ids, + ) + return config, inputs_dict + + def get_config(self): + return WhisperConfig( + vocab_size=self.vocab_size, + d_model=self.hidden_size, + encoder_layers=self.num_hidden_layers, + decoder_layers=self.num_hidden_layers, + encoder_attention_heads=self.num_attention_heads, + decoder_attention_heads=self.num_attention_heads, + input_channels=self.input_channels, + dropout=self.hidden_dropout_prob, + attention_dropout=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + max_source_positions=self.max_source_positions, + max_target_positions=self.max_target_positions, + eos_token_id=self.eos_token_id, + bos_token_id=self.bos_token_id, + pad_token_id=self.pad_token_id, + decoder_ffn_dim=self.hidden_size, + encoder_ffn_dim=self.hidden_size, + decoder_start_token_id=self.decoder_start_token_id, + suppress_tokens=self.suppress_tokens, + begin_suppress_tokens=self.begin_suppress_tokens, + ) + + def prepare_config_and_inputs_for_common(self): + config, inputs_dict = self.prepare_config_and_inputs() + return config, inputs_dict + + def get_subsampled_output_lengths(self, input_lengths): + """ + Computes the output length of the convolutional layers + """ + + for i in range(self.num_conv_layers): + input_lengths = (input_lengths - 1) // 2 + 1 + + return input_lengths + + def create_and_check_model_forward(self, config, inputs_dict): + model = WhisperModel(config=config).to(torch_device).eval() + + input_features = inputs_dict["input_features"] + decoder_input_ids = inputs_dict["decoder_input_ids"] + + # first forward pass + last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state + + self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) + + def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): + model = WhisperModel(config=config).get_decoder().to(torch_device).eval() + input_ids = inputs_dict["decoder_input_ids"] + attention_mask = inputs_dict["decoder_attention_mask"] + + # first forward pass + outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) + + output, past_key_values = outputs.to_tuple() + + # create hypothetical multiple next token and extent to next_input_ids + next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) + next_attn_mask = ids_tensor((self.batch_size, 3), 2) + + # append to next input_ids and + next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) + next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) + + output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] + output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ + "last_hidden_state" + ] + + # select random slice + random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() + output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() + output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() + + self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) + + # test that outputs are equal for slice + self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) + + def check_encoder_decoder_model_standalone(self, config, inputs_dict): + model = WhisperModel(config=config).to(torch_device).eval() + outputs = model(**inputs_dict) + + encoder_last_hidden_state = outputs.encoder_last_hidden_state + last_hidden_state = outputs.last_hidden_state + + with tempfile.TemporaryDirectory() as tmpdirname: + encoder = model.get_encoder() + encoder.save_pretrained(tmpdirname) + encoder = WhisperEncoder.from_pretrained(tmpdirname).to(torch_device) + + encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] + + self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) + + with tempfile.TemporaryDirectory() as tmpdirname: + decoder = model.get_decoder() + decoder.save_pretrained(tmpdirname) + decoder = WhisperDecoder.from_pretrained(tmpdirname).to(torch_device) + + last_hidden_state_2 = decoder( + input_ids=inputs_dict["decoder_input_ids"], + attention_mask=inputs_dict["decoder_attention_mask"], + encoder_hidden_states=encoder_last_hidden_state, + )[0] + + self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) + + +@require_torch +class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): + all_model_classes = (WhisperModel, WhisperForConditionalGeneration) if is_torch_available() else () + all_generative_model_classes = (WhisperForConditionalGeneration,) if is_torch_available() else () + is_encoder_decoder = True + fx_compatible = False + test_pruning = False + test_missing_keys = False + + input_name = "input_features" + + def setUp(self): + self.model_tester = WhisperModelTester(self) + self.config_tester = ConfigTester(self, config_class=WhisperConfig) + self.maxDiff = 3000 + + def test_config(self): + self.config_tester.run_common_tests() + + def test_save_load_strict(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs() + for model_class in self.all_model_classes: + model = model_class(config) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname) + model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) + self.assertEqual(info["missing_keys"], []) + + def test_model_forward(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model_forward(*config_and_inputs) + + def test_decoder_model_past_with_large_inputs(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) + + def test_encoder_decoder_model_standalone(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() + self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) + + def _get_input_ids_and_config(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + input_ids = inputs_dict[self.input_name] + + # cut to half length & take max batch_size 3 + max_batch_size = 3 + input_ids = input_ids[:max_batch_size, :, :] + + # generate max 3 tokens + max_length = input_ids.shape[-1] + 3 + if config.eos_token_id is not None and config.pad_token_id is None: + # hack to allow generate for models such as GPT2 as is done in `generate()` + config.pad_token_id = config.eos_token_id + + return config, input_ids, None, max_length + + # not implemented currently + def test_inputs_embeds(self): + pass + + # training is not supported yet + def test_training(self): + pass + + def test_training_gradient_checkpointing(self): + pass + + def test_generate_with_head_masking(self): + pass + + def test_generate_fp16(self): + config, input_dict = self.model_tester.prepare_config_and_inputs() + config.max_target_positions = 400 + input_features = input_dict["input_features"] + model = WhisperForConditionalGeneration(config).eval().to(torch_device) + if torch_device == "cuda": + input_features = input_features.half() + model.half() + model.generate(input_features) + model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.forward) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = [ + "input_features", + "decoder_input_ids", + "decoder_attention_mask", + ] + expected_arg_names.extend( + ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] + if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names + else ["encoder_outputs"] + ) + self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + model.to(torch_device) + model.eval() + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states + + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 + ) + self.assertEqual(len(hidden_states), expected_num_layers) + + if hasattr(self.model_tester, "encoder_seq_length"): + seq_length = self.model_tester.encoder_seq_length + else: + seq_length = self.model_tester.seq_length + + subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [subsampled_seq_length, self.model_tester.hidden_size], + ) + + if config.is_encoder_decoder: + hidden_states = outputs.decoder_hidden_states + + self.assertIsInstance(hidden_states, (list, tuple)) + self.assertEqual(len(hidden_states), expected_num_layers) + + decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [decoder_seq_length, self.model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + seq_len = getattr(self.model_tester, "seq_length", None) + decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) + encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) + decoder_key_length = getattr(self.model_tester, "decoder_key_length", 1) + encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + model.to(torch_device) + model.eval() + + subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) + subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) + + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + self.assertListEqual( + list(attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], + ) + out_len = len(outputs) + + correct_outlen = 5 + + # loss is at first position + if "labels" in inputs_dict: + correct_outlen += 1 # loss is added to beginning + if "past_key_values" in outputs: + correct_outlen += 1 # past_key_values have been returned + + self.assertEqual(out_len, correct_outlen) + + # decoder attentions + decoder_attentions = outputs.decoder_attentions + self.assertIsInstance(decoder_attentions, (list, tuple)) + self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(decoder_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], + ) + + # cross attentions + cross_attentions = outputs.cross_attentions + self.assertIsInstance(cross_attentions, (list, tuple)) + self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(cross_attentions[0].shape[-3:]), + [ + self.model_tester.num_attention_heads, + decoder_seq_length, + subsampled_encoder_key_length, + ], + ) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + model.to(torch_device) + model.eval() + with torch.no_grad(): + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + added_hidden_states = 2 + self.assertEqual(out_len + added_hidden_states, len(outputs)) + + self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + + self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(self_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], + ) + + def test_resize_tokens_embeddings(self): + ( + original_config, + inputs_dict, + ) = self.model_tester.prepare_config_and_inputs_for_common() + if not self.test_resize_embeddings: + return + + for model_class in self.all_model_classes: + config = copy.deepcopy(original_config) + model = model_class(config) + model.to(torch_device) + + if self.model_tester.is_training is False: + model.eval() + + model_vocab_size = config.vocab_size + # Retrieve the embeddings and clone theme + model_embed = model.resize_token_embeddings(model_vocab_size) + cloned_embeddings = model_embed.weight.clone() + + # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size + model_embed = model.resize_token_embeddings(model_vocab_size + 10) + self.assertEqual(model.config.vocab_size, model_vocab_size + 10) + # Check that it actually resizes the embeddings matrix + self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) + # Check that the model can still do a forward pass successfully (every parameter should be resized) + model(**self._prepare_for_class(inputs_dict, model_class)) + + # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size + model_embed = model.resize_token_embeddings(model_vocab_size - 15) + self.assertEqual(model.config.vocab_size, model_vocab_size - 15) + # Check that it actually resizes the embeddings matrix + self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) + + # make sure that decoder_input_ids are resized + if "decoder_input_ids" in inputs_dict: + inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) + model(**self._prepare_for_class(inputs_dict, model_class)) + + # Check that adding and removing tokens has not modified the first part of the embedding matrix. + models_equal = True + for p1, p2 in zip(cloned_embeddings, model_embed.weight): + if p1.data.ne(p2.data).sum() > 0: + models_equal = False + + self.assertTrue(models_equal) + + def test_resize_embeddings_untied(self): + ( + original_config, + inputs_dict, + ) = self.model_tester.prepare_config_and_inputs_for_common() + if not self.test_resize_embeddings: + return + + original_config.tie_word_embeddings = False + + # if model cannot untied embeddings -> leave test + if original_config.tie_word_embeddings: + return + + for model_class in self.all_model_classes: + config = copy.deepcopy(original_config) + model = model_class(config).to(torch_device) + + # if no output embeddings -> leave test + if model.get_output_embeddings() is None: + continue + + # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size + model_vocab_size = config.vocab_size + model.resize_token_embeddings(model_vocab_size + 10) + self.assertEqual(model.config.vocab_size, model_vocab_size + 10) + output_embeds = model.get_output_embeddings() + self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) + # Check bias if present + if output_embeds.bias is not None: + self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) + # Check that the model can still do a forward pass successfully (every parameter should be resized) + model(**self._prepare_for_class(inputs_dict, model_class)) + + # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size + model.resize_token_embeddings(model_vocab_size - 15) + self.assertEqual(model.config.vocab_size, model_vocab_size - 15) + # Check that it actually resizes the embeddings matrix + output_embeds = model.get_output_embeddings() + self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) + # Check bias if present + if output_embeds.bias is not None: + self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) + # Check that the model can still do a forward pass successfully (every parameter should be resized) + if "decoder_input_ids" in inputs_dict: + inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) + # Check that the model can still do a forward pass successfully (every parameter should be resized) + model(**self._prepare_for_class(inputs_dict, model_class)) + + def test_generate_without_input_ids(self): + pass + + @staticmethod + def _get_encoder_outputs( + model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 + ): + encoder = model.get_encoder() + encoder_outputs = encoder( + input_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( + num_interleave, dim=0 + ) + input_ids = input_ids[:, :, 0] + input_ids = torch.zeros_like(input_ids[:, :1], dtype=torch.long) + torch.tensor( + [model._get_decoder_start_token_id()], device=input_ids.device + ) + attention_mask = None + return encoder_outputs, input_ids, attention_mask + + def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): + batch_size, mel, seq_length = input_ids.shape + subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) + num_sequences_in_output = batch_size * num_return_sequences + gen_len = ( + output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length + ) + + # scores + self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) + + # Attentions + # encoder + self._check_encoder_attention_for_generate( + output.encoder_attentions, batch_size, config, subsampled_seq_length + ) + # decoder + self._check_attentions_for_generate( + num_sequences_in_output, + output.decoder_attentions, + min_length=1, + max_length=output.sequences.shape[-1], + config=config, + use_cache=use_cache, + ) + + # Hidden States + # encoder + self._check_encoder_hidden_states_for_generate( + output.encoder_hidden_states, batch_size, config, subsampled_seq_length + ) + + # decoder + self._check_hidden_states_for_generate( + num_sequences_in_output, + output.decoder_hidden_states, + min_length=1, + max_length=output.sequences.shape[-1], + config=config, + use_cache=use_cache, + ) + + def _create_and_check_torchscript(self, config, inputs_dict): + if not self.test_torchscript: + return + + configs_no_init = _config_zero_init(config) # To be sure we have no Nan + configs_no_init.torchscript = True + for model_class in self.all_model_classes: + model = model_class(config=configs_no_init) + model.to(torch_device) + model.eval() + inputs = self._prepare_for_class(inputs_dict, model_class) + + try: + model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward + input_features = inputs["input_features"] + decoder_input_ids = inputs["decoder_input_ids"] + decoder_attention_mask = inputs["decoder_attention_mask"] + traced_model = torch.jit.trace(model, (input_features, decoder_input_ids, decoder_attention_mask)) + except RuntimeError: + self.fail("Couldn't trace module.") + + with tempfile.TemporaryDirectory() as tmp_dir_name: + pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") + + try: + torch.jit.save(traced_model, pt_file_name) + except Exception: + self.fail("Couldn't save module.") + + try: + loaded_model = torch.jit.load(pt_file_name) + except Exception: + self.fail("Couldn't load module.") + + model.to(torch_device) + model.eval() + + loaded_model.to(torch_device) + loaded_model.eval() + + model_state_dict = model.state_dict() + loaded_model_state_dict = loaded_model.state_dict() + + self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) + + models_equal = True + for layer_name, p1 in model_state_dict.items(): + p2 = loaded_model_state_dict[layer_name] + if p1.data.ne(p2.data).sum() > 0: + models_equal = False + + self.assertTrue(models_equal) + + +@require_torch +@require_torchaudio +class WhisperModelIntegrationTests(unittest.TestCase): + @cached_property + def default_processor(self): + return WhisperProcessor.from_pretrained("openai/whisper-base") + + def _load_datasamples(self, num_samples): + + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + # automatic decoding with librispeech + speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] + + return [x["array"] for x in speech_samples] + + @slow + def test_tiny_logits_librispeech(self): + torch_device = "cpu" + set_seed(0) + model = WhisperModel.from_pretrained("openai/whisper-tiny") + model.to(torch_device) + input_speech = self._load_datasamples(1) + feature_extractor = WhisperFeatureExtractor() + input_features = feature_extractor(input_speech, return_tensors="pt").input_features + + with torch.no_grad(): + logits = model( + input_features, + decoder_input_ids=torch.tensor([[50258, 50259, 50359]]), + output_hidden_states=False, + output_attentions=False, + return_dict=False, + use_cache=False, + ) + + # fmt: off + EXPECTED_LOGITS = torch.tensor( + [ + 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, + 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, + 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, + 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 + ] + ) + # fmt: on + self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) + + # fmt: off + EXPECTED_GENERATION = torch.tensor( + [ + -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, + 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, + 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, + 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 + ] + ) + # fmt: on + + head_logits = logits[0] @ model.decoder.embed_tokens.weight.T + self.assertTrue(torch.allclose(head_logits[0, 0, :30].cpu(), EXPECTED_GENERATION, atol=1e-4)) + + @slow + def test_small_en_logits_librispeech(self): + set_seed(0) + torch_device = "cpu" + model = WhisperModel.from_pretrained("openai/whisper-small.en") + model.to(torch_device) + + input_speech = self._load_datasamples(1) + + feaure_extractor = WhisperFeatureExtractor() + input_features = feaure_extractor(input_speech, return_tensors="pt").input_features.to(torch_device) + + logits = model( + input_features, + decoder_input_ids=torch.tensor([[model.config.decoder_start_token_id]]), + output_hidden_states=False, + output_attentions=False, + use_cache=False, + ) + + logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T + + # fmt: off + EXPECTED_LOGITS = torch.tensor( + [ + -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, + -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, + -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, + -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, + -11.1146, -8.1918 + ] + ) + # fmt: on + self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) + + @slow + def test_large_logits_librispeech(self): + set_seed(0) + + torch_device = "cpu" + model = WhisperModel.from_pretrained("openai/whisper-large") + model.to(torch_device) + + input_speech = self._load_datasamples(1) + + processor = WhisperProcessor.from_pretrained("openai/whisper-large") + processed_inputs = processor(audio=input_speech, text="This part of the speech", return_tensors="pt") + input_features = processed_inputs.input_features.to(torch_device) + labels = processed_inputs.labels.to(torch_device) + + logits = model( + input_features, + decoder_input_ids=labels, + output_hidden_states=False, + output_attentions=False, + use_cache=False, + ) + + logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T + + # fmt: off + EXPECTED_LOGITS = torch.tensor( + [ + 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, + 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, + 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, + 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 + ] + ) + # fmt: on + + self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) + + @slow + def test_tiny_en_generation(self): + + torch_device = "cpu" + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + model.to(torch_device) + model.config.decoder_start_token_id = 50257 + + input_speech = self._load_datasamples(1) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( + torch_device + ) + + generated_ids = model.generate(input_features, num_beams=5) + transcript = processor.tokenizer.batch_decode(generated_ids)[0] + + EXPECTED_TRANSCRIPT = ( + "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" + " classes, and we are glad to" + ) + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_tiny_generation(self): + + torch_device = "cpu" + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") + model.to(torch_device) + + input_speech = self._load_datasamples(1) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( + torch_device + ) + + generated_ids = model.generate(input_features, num_beams=5) + transcript = processor.tokenizer.decode(generated_ids[0]) + + EXPECTED_TRANSCRIPT = ( + "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" + " classes and we are glad" + ) + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_large_generation(self): + torch_device = "cpu" + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-large") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") + model.to(torch_device) + + input_speech = self._load_datasamples(1) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( + torch_device + ) + + model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") + generated_ids = model.generate( + input_features, + do_sample=False, + ) + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_large_generation_multilingual(self): + torch_device = "cpu" + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-large") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") + model.to(torch_device) + + ds = load_dataset("common_voice", "ja", split="test", streaming=True) + ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) + input_speech = next(iter(ds))["audio"]["array"] + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( + torch_device + ) + + model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="transcribe") + generated_ids = model.generate(input_features, do_sample=False) + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") + generated_ids = model.generate( + input_features, + do_sample=False, + ) + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + EXPECTED_TRANSCRIPT = " Kimura san ni denwa wo kaite moraimashita" + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="translate") + generated_ids = model.generate(input_features, do_sample=False) + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_large_batched_generation(self): + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-large") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") + + input_speech = self._load_datasamples(4) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features + generated_ids = model.generate(input_features) + + # fmt: off + EXPECTED_LOGITS = torch.tensor( + [ + [50258, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404, 281], + [50258, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257, 50257], + [50258, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256], + [50258, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 11] + ] + ) + # fmt: on + + self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) + + # fmt: off + EXPECTED_TRANSCRIPT = [ + ' Mr. Quilter is the apostle of the middle classes, and we are glad to', + " Nor is Mr. Quilter's manner less interesting than his matter.", + " He tells us that at this festive season of the year, with Christmas and roast beef", + " He has grave doubts whether Sir Frederick Layton's work is really Greek after all," + ] + # fmt: on + + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) + self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_tiny_en_batched_generation(self): + torch_device = "cuda" + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + model.to(torch_device) + + input_speech = self._load_datasamples(4) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( + torch_device + ) + generated_ids = model.generate(input_features).to("cpu") + + # fmt: off + EXPECTED_LOGITS = torch.tensor( + [ + [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], + [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], + [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], + [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] + ] + + ) + # fmt: on + + self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) + + # fmt: off + EXPECTED_TRANSCRIPT = [ + " Mr. Quilter is the apostle of the middle classes, and we are glad to", + " Nor is Mr. Quilter's manner less interesting than his matter.", + " He tells us that at this festive season of the year, with Christmas and roast beef looming", + " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", + ] + # fmt: on + + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) + self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) diff --git a/tests/models/whisper/test_processor_whisper.py b/tests/models/whisper/test_processor_whisper.py new file mode 100644 index 00000000000000..00a5995f003dab --- /dev/null +++ b/tests/models/whisper/test_processor_whisper.py @@ -0,0 +1,118 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import shutil +import tempfile +import unittest + +from transformers import WhisperTokenizer, is_speech_available +from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio + +from .test_feature_extraction_whisper import floats_list + + +if is_speech_available(): + from transformers import WhisperFeatureExtractor, WhisperProcessor + + +@require_torch +@require_torchaudio +@require_sentencepiece +class WhisperProcessorTest(unittest.TestCase): + def setUp(self): + self.checkpoint = "openai/whisper-small.en" + self.tmpdirname = tempfile.mkdtemp() + + def get_tokenizer(self, **kwargs): + return WhisperTokenizer.from_pretrained(self.checkpoint, **kwargs) + + def get_feature_extractor(self, **kwargs): + return WhisperFeatureExtractor.from_pretrained(self.checkpoint, **kwargs) + + def tearDown(self): + shutil.rmtree(self.tmpdirname) + + def test_save_load_pretrained_default(self): + tokenizer = self.get_tokenizer() + feature_extractor = self.get_feature_extractor() + + processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + processor.save_pretrained(self.tmpdirname) + processor = WhisperProcessor.from_pretrained(self.tmpdirname) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) + self.assertIsInstance(processor.tokenizer, WhisperTokenizer) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) + self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor) + + def test_save_load_pretrained_additional_features(self): + processor = WhisperProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor()) + processor.save_pretrained(self.tmpdirname) + + tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") + feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0) + + processor = WhisperProcessor.from_pretrained( + self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 + ) + + self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) + self.assertIsInstance(processor.tokenizer, WhisperTokenizer) + + self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) + self.assertIsInstance(processor.feature_extractor, WhisperFeatureExtractor) + + def test_feature_extractor(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + raw_speech = floats_list((3, 1000)) + + input_feat_extract = feature_extractor(raw_speech, return_tensors="np") + input_processor = processor(raw_speech, return_tensors="np") + + for key in input_feat_extract.keys(): + self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) + + def test_tokenizer(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + input_str = "This is a test string" + + encoded_processor = processor(text=input_str) + + encoded_tok = tokenizer(input_str) + + for key in encoded_tok.keys(): + self.assertListEqual(encoded_tok[key], encoded_processor[key]) + + def test_tokenizer_decode(self): + feature_extractor = self.get_feature_extractor() + tokenizer = self.get_tokenizer() + + processor = WhisperProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor) + + predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] + + decoded_processor = processor.batch_decode(predicted_ids) + decoded_tok = tokenizer.batch_decode(predicted_ids) + + self.assertListEqual(decoded_tok, decoded_processor) diff --git a/tests/models/whisper/test_tokenization_whisper.py b/tests/models/whisper/test_tokenization_whisper.py new file mode 100644 index 00000000000000..4dc66a499186a4 --- /dev/null +++ b/tests/models/whisper/test_tokenization_whisper.py @@ -0,0 +1,190 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from transformers.models.whisper import WhisperTokenizer +from transformers.testing_utils import slow + +from ...test_tokenization_common import TokenizerTesterMixin + + +EN_CODE = 50258 +ES_CODE = 50256 + + +class WhisperTokenizerTest(TokenizerTesterMixin, unittest.TestCase): + tokenizer_class = WhisperTokenizer + test_rust_tokenizer = False + test_sentencepiece = False + + def setUp(self): + super().setUp() + tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny") + tokenizer.pad_token_id = 50256 + tokenizer.pad_token = "<|endoftext|>" + tokenizer.save_pretrained(self.tmpdirname) + + def test_convert_token_and_id(self): + """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" + token = "Where" + token_id = 14436 + + self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) + self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) + + def test_get_vocab(self): + vocab_keys = list(self.get_tokenizer().get_vocab().keys()) + + self.assertEqual(vocab_keys[0], "!") + self.assertEqual(vocab_keys[1], '"') + self.assertEqual(vocab_keys[-1], "<|notimestamps|>") + self.assertEqual(len(vocab_keys), 50364) + + def test_vocab_size(self): + self.assertEqual(self.get_tokenizer().vocab_size, 50257) + + def test_full_tokenizer(self): + tokenizer = WhisperTokenizer.from_pretrained(self.tmpdirname) + + tokens = tokenizer.tokenize("This is a test") + self.assertListEqual(tokens, ["This", "Ġis", "Ġa", "Ġ", "test"]) + + self.assertListEqual( + tokenizer.convert_tokens_to_ids(tokens), + [5723, 307, 257, 220, 31636], + ) + + tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") + self.assertListEqual( + tokens, + # fmt: off + ['I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ9', '2000', ',', 'Ġand', 'Ġ', 'this', 'Ġis', 'Ġfals', 'é', '.'], + # fmt: on + ) + ids = tokenizer.convert_tokens_to_ids(tokens) + self.assertListEqual(ids, [40, 390, 4232, 294, 1722, 25743, 11, 293, 220, 11176, 307, 16720, 526, 13]) + + back_tokens = tokenizer.convert_ids_to_tokens(ids) + self.assertListEqual( + back_tokens, + # fmt: off + ['I', 'Ġwas', 'Ġborn', 'Ġin', 'Ġ9', '2000', ',', 'Ġand', 'Ġ', 'this', 'Ġis', 'Ġfals', 'é', '.'], + # fmt: on + ) + + def test_tokenizer_slow_store_full_signature(self): + pass + + @slow + def test_tokenizer_integration(self): + # fmt: off + expected_encoding = {'input_ids': [[41762, 364, 357, 36234, 1900, 355, 12972, 13165, 354, 12, 35636, 364, 290, 12972, 13165, 354, 12, 5310, 13363, 12, 4835, 8, 3769, 2276, 12, 29983, 45619, 357, 13246, 51, 11, 402, 11571, 12, 17, 11, 5564, 13246, 38586, 11, 16276, 44, 11, 4307, 346, 33, 861, 11, 16276, 7934, 23029, 329, 12068, 15417, 28491, 357, 32572, 52, 8, 290, 12068, 15417, 16588, 357, 32572, 38, 8, 351, 625, 3933, 10, 2181, 13363, 4981, 287, 1802, 10, 8950, 290, 2769, 48817, 1799, 1022, 449, 897, 11, 9485, 15884, 354, 290, 309, 22854, 37535, 13], [13246, 51, 318, 3562, 284, 662, 12, 27432, 2769, 8406, 4154, 282, 24612, 422, 9642, 9608, 276, 2420, 416, 26913, 21143, 319, 1111, 1364, 290, 826, 4732, 287, 477, 11685, 13], [464, 2068, 7586, 21831, 18045, 625, 262, 16931, 3290, 13]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 + # fmt: on + + self.tokenizer_integration_test_util( + expected_encoding=expected_encoding, model_name="openai/whisper-tiny.en", padding=False + ) + + +class SpeechToTextTokenizerMultilinguialTest(unittest.TestCase): + checkpoint_name = "openai/whisper-small.en" + + transcript = ( + "'<|startoftranscript|> <|en|> <|transcribe|> <|notimestamps|> Nor is Mr. Quilters manner less interesting" + " than his matter.<|endoftext|>'" + ) + clean_transcript = " Nor is Mr. Quilters manner less interesting than his matter." + french_text = "Bonjour! Il me semble que Mrs Quilters n'était pas présente" + + @classmethod + def setUpClass(cls): + cls.tokenizer: WhisperTokenizer = WhisperTokenizer.from_pretrained(cls.checkpoint_name) + return cls + + def test_tokenizer_equivalence(self): + text = "다람쥐 헌 쳇바퀴에 타고파" + multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny", language="ko") + gpt2_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny.en") + + gpt2_tokens = gpt2_tokenizer.encode(text) + multilingual_tokens = multilingual_tokenizer.encode(text) + + assert gpt2_tokenizer.decode(gpt2_tokens) == text + assert multilingual_tokenizer.decode(multilingual_tokens) == text + assert len(gpt2_tokens) > len(multilingual_tokens) + + # fmt: off + EXPECTED_ENG = [ + 46695, 97, 167, 252, 234, 168, 98, 238, 220, 169, + 245, 234, 23821, 111, 229, 167, 108, 242, 169, 222, + 112, 168, 245, 238, 220, 169, 225, 222, 166, 111, + 254, 169, 234, 234 + ] + EXPECTED_MULTI = [ + 9835, 22855, 168, 98, 238, 13431, 234, 43517, 229, 47053, + 169, 222, 19086, 19840, 1313, 17974 + ] + # fmt: on + + self.assertListEqual(gpt2_tokens, EXPECTED_ENG) + self.assertListEqual(multilingual_tokens, EXPECTED_MULTI) + + def test_tokenizer_special(self): + multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny.en") + text = "<|startoftranscript|>Hey! How are you feeling? J'ai l'impression que 郷さん est prêt<|endoftext|>" + + multilingual_tokens = multilingual_tokenizer.encode(text) + + # fmt: off + EXPECTED_MULTI = [ + 50257, 10814, 0, 1374, 389, 345, 4203, 30, 449, 6, + 1872, 300, 6, 11011, 2234, 8358, 16268, 225, 115, 43357, + 22174, 1556, 778, 25792, 83, 50256 + ] + # fmt: on + + self.assertListEqual(multilingual_tokens, EXPECTED_MULTI) + + self.assertEqual(text, multilingual_tokenizer.decode(multilingual_tokens)) + + transcript = multilingual_tokenizer.decode(multilingual_tokens, skip_special_tokens=True) + + EXPECTED_JAP = "Hey! How are you feeling? J'ai l'impression que 郷さん est prêt" + self.assertEqual(transcript, EXPECTED_JAP) + + def test_vocab_size(self): + self.assertEqual(self.tokenizer.vocab_size, 50257) + + def test_tokenizer_decode_ignores_language_codes(self): + self.assertIn(ES_CODE, self.tokenizer.all_special_ids) + generated_ids = [ES_CODE, 4, 1601, 47, 7647, 2] + result = self.tokenizer.decode(generated_ids, skip_special_tokens=True) + expected_spanish = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=True) + self.assertEqual(result, expected_spanish) + self.assertNotIn(self.tokenizer.eos_token, result) + + def test_batch_encoding(self): + multilingual_tokenizer = WhisperTokenizer.from_pretrained("openai/whisper-tiny.en") + batch = ["<|en|><|notimestamps|>", "<|en|><|notimestamps|>I am sure that"] + batch_output = multilingual_tokenizer.batch_encode_plus(batch, padding=True).input_ids + + # fmt: off + EXPECTED_MULTI = [ + [50258, 50362, 50256, 50256, 50256, 50256], + [50258, 50362, 40, 716, 1654, 326] + ] + # fmt: on + + self.assertListEqual(batch_output, EXPECTED_MULTI) diff --git a/tests/test_configuration_common.py b/tests/test_configuration_common.py index c2d48ef6625410..df05d2a4ac84fd 100644 --- a/tests/test_configuration_common.py +++ b/tests/test_configuration_common.py @@ -84,6 +84,8 @@ "sep_token_id": 9, "decoder_start_token_id": 10, "exponential_decay_length_penalty": (5, 1.01), + "suppress_tokens": [0, 1], + "begin_suppress_tokens": 2, "task_specific_params": {"translation": "some_params"}, "problem_type": "regression", } diff --git a/utils/check_repo.py b/utils/check_repo.py index fdfcb292e43428..a8c6e888958244 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -51,6 +51,8 @@ "DeformableDetrEncoder", # Building part of bigger (tested) model. "DeformableDetrDecoder", # Building part of bigger (tested) model. "OPTDecoder", # Building part of bigger (tested) model. + "WhisperDecoder", # Building part of bigger (tested) model. + "WhisperEncoder", # Building part of bigger (tested) model. "DecisionTransformerGPT2Model", # Building part of bigger (tested) model. "SegformerDecodeHead", # Building part of bigger (tested) model. "PLBartEncoder", # Building part of bigger (tested) model. diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 06b042c1fc0fb1..7f442c3fdb88dc 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -96,4 +96,5 @@ src/transformers/models/wav2vec2/tokenization_wav2vec2.py src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py src/transformers/models/wavlm/modeling_wavlm.py +src/transformers/models/whisper/modeling_whisper.py src/transformers/models/yolos/modeling_yolos.py From bad353cebfb853212b124acc94a8fbf72f63cc9f Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Thu, 6 Oct 2022 00:16:36 +0300 Subject: [PATCH 477/539] Fix DETR segmentation postprocessing output (#19363) Ensures post_process_instance_segmentation and post_process_panoptic_segmentation methods return a tensor of shape (target_height, target_width) filled with -1 values if no segment with score > threshold is found. --- .../models/detr/feature_extraction_detr.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/detr/feature_extraction_detr.py b/src/transformers/models/detr/feature_extraction_detr.py index 04fb123cf603d9..f4643bf426df86 100644 --- a/src/transformers/models/detr/feature_extraction_detr.py +++ b/src/transformers/models/detr/feature_extraction_detr.py @@ -1261,8 +1261,9 @@ def post_process_instance_segmentation( # No mask found if mask_probs_item.shape[0] <= 0: - segmentation = None - segments: List[Dict] = [] + height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] + segmentation = torch.zeros((height, width)) - 1 + results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item @@ -1347,8 +1348,9 @@ def post_process_panoptic_segmentation( # No mask found if mask_probs_item.shape[0] <= 0: - segmentation = None - segments: List[Dict] = [] + height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] + segmentation = torch.zeros((height, width)) - 1 + results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item From 7e7f62bfa72ca03e9f16285dad182f7c57cd8cab Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Wed, 5 Oct 2022 17:48:14 -0400 Subject: [PATCH 478/539] Fix pipeline tests for Roberta-like tokenizers (#19365) * Fix pipeline tests for Roberta-like tokenizers * Fix fix --- tests/pipelines/test_pipelines_common.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index ea32f5cac4d467..34684186b54d4e 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -37,8 +37,6 @@ AutoModelForSequenceClassification, AutoTokenizer, DistilBertForSequenceClassification, - IBertConfig, - RobertaConfig, TextClassificationPipeline, TFAutoModelForSequenceClassification, pipeline, @@ -71,6 +69,16 @@ logger = logging.getLogger(__name__) +ROBERTA_EMBEDDING_ADJUSMENT_CONFIGS = [ + "CamembertConfig", + "IBertConfig", + "LongformerConfig", + "MarkupLMConfig", + "RobertaConfig", + "XLMRobertaConfig", +] + + def get_checkpoint_from_architecture(architecture): try: module = importlib.import_module(architecture.__module__) @@ -194,7 +202,7 @@ def test(self): try: tokenizer = get_tiny_tokenizer_from_checkpoint(checkpoint) # XLNet actually defines it as -1. - if isinstance(model.config, (RobertaConfig, IBertConfig)): + if model.config.__class__.__name__ in ROBERTA_EMBEDDING_ADJUSMENT_CONFIGS: tokenizer.model_max_length = model.config.max_position_embeddings - 2 elif ( hasattr(model.config, "max_position_embeddings") From f0b490151e4a851c0821e1f215bb7a26565d24f7 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Thu, 6 Oct 2022 12:04:01 +0300 Subject: [PATCH 479/539] =?UTF-8?q?=F0=9F=9A=A8=20=F0=9F=9A=A8=20?= =?UTF-8?q?=F0=9F=9A=A8=20Fix=20ViT=20parameter=20initialization=20(#19341?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR aims to rectify the discrepancy between the training performances of HF and Timm ViT implementations. - Initializes torch and flax ViT dense layer weights with trunc_normal instead of normal (consistent with the TF implementation. - Initializes cls_token and positional_embeddings with trunc_normal - Updates DeiT copy to reflect the changes --- src/transformers/models/deit/modeling_deit.py | 4 +- .../models/vit/modeling_flax_vit.py | 46 ++++++++++++++----- .../models/vit/modeling_tf_vit.py | 7 ++- src/transformers/models/vit/modeling_vit.py | 14 ++++-- .../models/vit_mae/modeling_vit_mae.py | 1 - 5 files changed, 50 insertions(+), 22 deletions(-) diff --git a/src/transformers/models/deit/modeling_deit.py b/src/transformers/models/deit/modeling_deit.py index 44110f5e443557..c92d656c0948b8 100644 --- a/src/transformers/models/deit/modeling_deit.py +++ b/src/transformers/models/deit/modeling_deit.py @@ -402,9 +402,7 @@ class DeiTPreTrainedModel(PreTrainedModel): def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): diff --git a/src/transformers/models/vit/modeling_flax_vit.py b/src/transformers/models/vit/modeling_flax_vit.py index 7a438abb032938..5d0527f5a78a31 100644 --- a/src/transformers/models/vit/modeling_flax_vit.py +++ b/src/transformers/models/vit/modeling_flax_vit.py @@ -101,7 +101,9 @@ def setup(self): strides=(patch_size, patch_size), padding="VALID", dtype=self.dtype, - kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + kernel_init=jax.nn.initializers.variance_scaling( + self.config.initializer_range**2, "fan_in", "truncated_normal" + ), ) def __call__(self, pixel_values): @@ -122,11 +124,17 @@ class FlaxViTEmbeddings(nn.Module): dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): - self.cls_token = self.param("cls_token", nn.initializers.zeros, (1, 1, self.config.hidden_size)) + self.cls_token = self.param( + "cls_token", + jax.nn.initializers.variance_scaling(self.config.initializer_range**2, "fan_in", "truncated_normal"), + (1, 1, self.config.hidden_size), + ) self.patch_embeddings = FlaxViTPatchEmbeddings(self.config, dtype=self.dtype) num_patches = self.patch_embeddings.num_patches self.position_embeddings = self.param( - "position_embeddings", nn.initializers.zeros, (1, num_patches + 1, self.config.hidden_size) + "position_embeddings", + jax.nn.initializers.variance_scaling(self.config.initializer_range**2, "fan_in", "truncated_normal"), + (1, num_patches + 1, self.config.hidden_size), ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) @@ -156,19 +164,25 @@ def setup(self): self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, - kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + kernel_init=jax.nn.initializers.variance_scaling( + self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal" + ), use_bias=self.config.qkv_bias, ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, - kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + kernel_init=jax.nn.initializers.variance_scaling( + self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal" + ), use_bias=self.config.qkv_bias, ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, - kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + kernel_init=jax.nn.initializers.variance_scaling( + self.config.initializer_range**2, mode="fan_in", distribution="truncated_normal" + ), use_bias=self.config.qkv_bias, ) @@ -214,7 +228,9 @@ class FlaxViTSelfOutput(nn.Module): def setup(self): self.dense = nn.Dense( self.config.hidden_size, - kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + kernel_init=jax.nn.initializers.variance_scaling( + self.config.initializer_range**2, "fan_in", "truncated_normal" + ), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) @@ -253,7 +269,9 @@ class FlaxViTIntermediate(nn.Module): def setup(self): self.dense = nn.Dense( self.config.intermediate_size, - kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + kernel_init=jax.nn.initializers.variance_scaling( + self.config.initializer_range**2, "fan_in", "truncated_normal" + ), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] @@ -271,7 +289,9 @@ class FlaxViTOutput(nn.Module): def setup(self): self.dense = nn.Dense( self.config.hidden_size, - kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + kernel_init=jax.nn.initializers.variance_scaling( + self.config.initializer_range**2, "fan_in", "truncated_normal" + ), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) @@ -394,7 +414,9 @@ class FlaxViTPooler(nn.Module): def setup(self): self.dense = nn.Dense( self.config.hidden_size, - kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + kernel_init=jax.nn.initializers.variance_scaling( + self.config.initializer_range**2, "fan_in", "truncated_normal" + ), dtype=self.dtype, ) @@ -572,7 +594,9 @@ def setup(self): self.classifier = nn.Dense( self.config.num_labels, dtype=self.dtype, - kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + kernel_init=jax.nn.initializers.variance_scaling( + self.config.initializer_range**2, "fan_in", "truncated_normal" + ), ) def __call__( diff --git a/src/transformers/models/vit/modeling_tf_vit.py b/src/transformers/models/vit/modeling_tf_vit.py index 727cbb75170b1e..8ce5420169c5f0 100644 --- a/src/transformers/models/vit/modeling_tf_vit.py +++ b/src/transformers/models/vit/modeling_tf_vit.py @@ -69,11 +69,14 @@ def build(self, input_shape: tf.TensorShape): num_patches = self.patch_embeddings.num_patches self.cls_token = self.add_weight( - shape=(1, 1, self.config.hidden_size), initializer="zeros", trainable=True, name="cls_token" + shape=(1, 1, self.config.hidden_size), + initializer=get_initializer(self.config.initializer_range), + trainable=True, + name="cls_token", ) self.position_embeddings = self.add_weight( shape=(1, num_patches + 1, self.config.hidden_size), - initializer="zeros", + initializer=get_initializer(self.config.initializer_range), trainable=True, name="position_embeddings", ) diff --git a/src/transformers/models/vit/modeling_vit.py b/src/transformers/models/vit/modeling_vit.py index e6df4baa701950..5c6bbefca4bdd7 100644 --- a/src/transformers/models/vit/modeling_vit.py +++ b/src/transformers/models/vit/modeling_vit.py @@ -67,11 +67,17 @@ class ViTEmbeddings(nn.Module): def __init__(self, config: ViTConfig, use_mask_token: bool = False) -> None: super().__init__() - self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) + self.cls_token = nn.Parameter( + nn.init.trunc_normal_(torch.zeros(1, 1, config.hidden_size), mean=0.0, std=config.initializer_range) + ) self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size)) if use_mask_token else None self.patch_embeddings = ViTPatchEmbeddings(config) num_patches = self.patch_embeddings.num_patches - self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size)) + self.position_embeddings = nn.Parameter( + nn.init.trunc_normal_( + torch.zeros(1, num_patches + 1, config.hidden_size), mean=0.0, std=config.initializer_range + ) + ) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config @@ -440,9 +446,7 @@ class ViTPreTrainedModel(PreTrainedModel): def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): - # Slightly different from the TF version which uses truncated_normal for initialization - # cf https://github.com/pytorch/pytorch/pull/5617 - module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + module.weight.data = nn.init.trunc_normal_(module.weight.data, mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): diff --git a/src/transformers/models/vit_mae/modeling_vit_mae.py b/src/transformers/models/vit_mae/modeling_vit_mae.py index b612d2f67b091d..d55ef54473b534 100755 --- a/src/transformers/models/vit_mae/modeling_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_vit_mae.py @@ -581,7 +581,6 @@ class ViTMAEPreTrainedModel(PreTrainedModel): main_input_name = "pixel_values" supports_gradient_checkpointing = True - # Copied from transformers.models.vit.modeling_vit.ViTPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): From ce2620194b4a8f070cd29504d34a79758affdf95 Mon Sep 17 00:00:00 2001 From: Ilaygoldman <29836366+Ilaygoldman@users.noreply.github.com> Date: Fri, 7 Oct 2022 00:06:39 +0300 Subject: [PATCH 480/539] Change link of repojacking vulnerable link (#19393) The link to https://github.com/vasudevgupta7/bigbird is vulnerable to repojacking (it redirects to the orignial project that changed name), you should change the link to the current name of the project. if you won't change the link, an attacker can open the linked repository and attacks users that trust your links --- examples/research_projects/jax-projects/big_bird/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/research_projects/jax-projects/big_bird/README.md b/examples/research_projects/jax-projects/big_bird/README.md index 36e2f52a796bc1..e8ef274bbe07cd 100644 --- a/examples/research_projects/jax-projects/big_bird/README.md +++ b/examples/research_projects/jax-projects/big_bird/README.md @@ -1,5 +1,5 @@ -Author: [@vasudevgupta7](https://github.com/vasudevgupta7) +Author: [@vasudevgupta7](https://github.com/thevasudevgupta/) ## Intro @@ -57,4 +57,4 @@ wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/r python3 evaluate.py ``` -You can find our checkpoint on HuggingFace Hub ([see this](https://huggingface.co/vasudevgupta/flax-bigbird-natural-questions)). In case you are interested in PyTorch BigBird fine-tuning, you can refer to [this repositary](https://github.com/vasudevgupta7/bigbird). +You can find our checkpoint on HuggingFace Hub ([see this](https://huggingface.co/vasudevgupta/flax-bigbird-natural-questions)). In case you are interested in PyTorch BigBird fine-tuning, you can refer to [this repositary](https://github.com/thevasudevgupta/bigbird). From ae3e3bc60a5f0834d952dfead4b28b1ce506125d Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Fri, 7 Oct 2022 01:02:26 +0300 Subject: [PATCH 481/539] fix docs example, add object_detection to DETR docs (#19377) --- docs/source/en/model_doc/detr.mdx | 1 + src/transformers/models/detr/modeling_detr.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/source/en/model_doc/detr.mdx b/docs/source/en/model_doc/detr.mdx index a6025580a6cb60..e2e7dbf5ff85c5 100644 --- a/docs/source/en/model_doc/detr.mdx +++ b/docs/source/en/model_doc/detr.mdx @@ -171,6 +171,7 @@ mean Average Precision (mAP) and Panoptic Quality (PQ). The latter objects are i [[autodoc]] DetrFeatureExtractor - __call__ - pad_and_create_pixel_mask + - post_process_object_detection - post_process_semantic_segmentation - post_process_instance_segmentation - post_process_panoptic_segmentation diff --git a/src/transformers/models/detr/modeling_detr.py b/src/transformers/models/detr/modeling_detr.py index 724c2b71a7bfd5..8fcbfe09c04ef3 100644 --- a/src/transformers/models/detr/modeling_detr.py +++ b/src/transformers/models/detr/modeling_detr.py @@ -1605,11 +1605,11 @@ def forward( >>> # Use the `post_process_panoptic_segmentation` method of `DetrFeatureExtractor` to retrieve post-processed panoptic segmentation maps >>> # Segmentation results are returned as a list of dictionaries - >>> result = feature_extractor.post_process_panoptic_segmentation(outputs, target_size=[(300, 500)]) + >>> result = feature_extractor.post_process_panoptic_segmentation(outputs, target_sizes=[(300, 500)]) - >>> # A tensor of shape (height, width) where each value denotes a segment id + >>> # A tensor of shape (height, width) where each value denotes a segment id, filled with -1 if no segment is found >>> panoptic_seg = result[0]["segmentation"] - >>> # Get mapping of segment ids to semantic class ids + >>> # Get prediction score and segment_id to class_id mapping of each segment >>> panoptic_segments_info = result[0]["segments_info"] ```""" From 7e348aac96dcc14cbe59496f0a1eb7df30a154d5 Mon Sep 17 00:00:00 2001 From: IMvision12 <88665786+IMvision12@users.noreply.github.com> Date: Fri, 7 Oct 2022 17:29:02 +0530 Subject: [PATCH 482/539] Making `ConvBert Tokenizer` independent from `bert Tokenizer` (#19347) * ConvBert * added comment * Updated * Final_updates * Update tokenization_convbert.py * Update tokenization_convbert_fast.py * Update tokenization_convbert.py * Update tokenization_convbert.py * Update tokenization_convbert_fast.py * Update tokenization_convbert.py * Update tokenization_convbert_fast.py * Updates * Updates * Updated * Final Updates --- .../models/convbert/tokenization_convbert.py | 469 +++++++++++++++++- .../convbert/tokenization_convbert_fast.py | 150 +++++- 2 files changed, 606 insertions(+), 13 deletions(-) diff --git a/src/transformers/models/convbert/tokenization_convbert.py b/src/transformers/models/convbert/tokenization_convbert.py index 8bf1b2826e0aed..bf3fb9994727d0 100644 --- a/src/transformers/models/convbert/tokenization_convbert.py +++ b/src/transformers/models/convbert/tokenization_convbert.py @@ -13,8 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for ConvBERT.""" +import collections +import os +import unicodedata +from typing import List, Optional, Tuple + +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging -from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) @@ -45,14 +50,466 @@ } -class ConvBertTokenizer(BertTokenizer): +# Copied from transformers.models.bert.tokenization_bert.load_vocab +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->YituTech/conv-bert-base, ConvBertTokenizer->BertTokenizer, BERT->ConvBERT +class ConvBertTokenizer(PreTrainedTokenizer): r""" - Construct a ConvBERT tokenizer. [`ConvBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end - tokenization: punctuation splitting and wordpiece. Refer to superclass [`BertTokenizer`] for usage examples and - documentation concerning parameters. + Construct a ConvBERT tokenizer. Based on WordPiece. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + do_basic_tokenize (`bool`, *optional*, defaults to `True`): + Whether or not to do basic tokenization before WordPiece. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original ConvBERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__( + self, + vocab_file, + do_lower_case=True, + do_basic_tokenize=True, + never_split=None, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs + ): + super().__init__( + do_lower_case=do_lower_case, + do_basic_tokenize=do_basic_tokenize, + never_split=never_split, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" + " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + ) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) + + @property + def do_lower_case(self): + return self.basic_tokenizer.do_lower_case + + @property + def vocab_size(self): + return len(self.vocab) + + def get_vocab(self): + return dict(self.vocab, **self.added_tokens_encoder) + + def _tokenize(self, text): + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): + + # If the token is part of the never_split set + if token in self.basic_tokenizer.never_split: + split_tokens.append(token) + else: + split_tokens += self.wordpiece_tokenizer.tokenize(token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = " ".join(tokens).replace(" ##", "").strip() + return out_string + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A ConvBERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + else: + vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(token + "\n") + index += 1 + return (vocab_file,) + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + + For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through *BasicTokenizer*. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens diff --git a/src/transformers/models/convbert/tokenization_convbert_fast.py b/src/transformers/models/convbert/tokenization_convbert_fast.py index 383382e13082b8..65c37a9b0927e8 100644 --- a/src/transformers/models/convbert/tokenization_convbert_fast.py +++ b/src/transformers/models/convbert/tokenization_convbert_fast.py @@ -13,8 +13,13 @@ # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for ConvBERT.""" +import json +from typing import List, Optional, Tuple + +from tokenizers import normalizers + +from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging -from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_convbert import ConvBertTokenizer @@ -46,17 +51,148 @@ } -class ConvBertTokenizerFast(BertTokenizerFast): +# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->YituTech/conv-bert-base, Bert->ConvBert, BERT->ConvBERT +class ConvBertTokenizerFast(PreTrainedTokenizerFast): r""" - Construct a "fast" ConvBERT tokenizer (backed by HuggingFace's *tokenizers* library). + Construct a "fast" ConvBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. - [`ConvBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation - splitting and wordpiece. + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. - Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters. + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + clean_text (`bool`, *optional*, defaults to `True`): + Whether or not to clean the text before tokenization by removing any control characters and replacing all + whitespaces by the classic one. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this + issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original ConvBERT). + wordpieces_prefix (`str`, *optional*, defaults to `"##"`): + The prefix for subwords. """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = ConvBertTokenizer + + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + do_lower_case=True, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs + ): + super().__init__( + vocab_file, + tokenizer_file=tokenizer_file, + do_lower_case=do_lower_case, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) + if ( + normalizer_state.get("lowercase", do_lower_case) != do_lower_case + or normalizer_state.get("strip_accents", strip_accents) != strip_accents + or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars + ): + normalizer_class = getattr(normalizers, normalizer_state.pop("type")) + normalizer_state["lowercase"] = do_lower_case + normalizer_state["strip_accents"] = strip_accents + normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars + self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) + + self.do_lower_case = do_lower_case + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A ConvBERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + + if token_ids_1: + output += token_ids_1 + [self.sep_token_id] + + return output + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) From 46fd04b481e23cdaffb42b7587ca95f2d794def9 Mon Sep 17 00:00:00 2001 From: Zachary Mueller Date: Fri, 7 Oct 2022 08:36:05 -0400 Subject: [PATCH 483/539] Fix gather for metrics (#19389) From 969534af4bf8049b674917c712dd9c1f9ae88242 Mon Sep 17 00:00:00 2001 From: IMvision12 <88665786+IMvision12@users.noreply.github.com> Date: Fri, 7 Oct 2022 18:14:50 +0530 Subject: [PATCH 484/539] Added Type hints for XLM TF (#19333) * Update modeling_tf_xlm.py * Updates * Update src/transformers/models/xlm/modeling_tf_xlm.py * Update src/transformers/models/xlm/modeling_tf_xlm.py * Update src/transformers/models/xlm/modeling_tf_xlm.py * Update src/transformers/models/xlm/modeling_tf_xlm.py * Update src/transformers/models/xlm/modeling_tf_xlm.py Co-authored-by: Matt --- .../models/xlm/modeling_tf_xlm.py | 143 +++++++++--------- 1 file changed, 72 insertions(+), 71 deletions(-) diff --git a/src/transformers/models/xlm/modeling_tf_xlm.py b/src/transformers/models/xlm/modeling_tf_xlm.py index 8bc0925c2fd829..3511e2f44963dd 100644 --- a/src/transformers/models/xlm/modeling_tf_xlm.py +++ b/src/transformers/models/xlm/modeling_tf_xlm.py @@ -19,7 +19,7 @@ import itertools import warnings from dataclasses import dataclass -from typing import Dict, Optional, Tuple +from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf @@ -33,6 +33,7 @@ TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( + TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, @@ -844,19 +845,19 @@ def prepare_inputs_for_generation(self, inputs, **kwargs): ) def call( self, - input_ids=None, - attention_mask=None, - langs=None, - token_type_ids=None, - position_ids=None, - lengths=None, - cache=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + langs: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + lengths: Optional[Union[np.ndarray, tf.Tensor]] = None, + cache: Optional[Dict[str, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, ): transformer_outputs = self.transformer( input_ids=input_ids, @@ -916,20 +917,20 @@ def __init__(self, config, *inputs, **kwargs): ) def call( self, - input_ids=None, - attention_mask=None, - langs=None, - token_type_ids=None, - position_ids=None, - lengths=None, - cache=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - labels=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + langs: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + lengths: Optional[Union[np.ndarray, tf.Tensor]] = None, + cache: Optional[Dict[str, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: bool = False, ): r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): @@ -1023,20 +1024,20 @@ def dummy_inputs(self): ) def call( self, - input_ids=None, - attention_mask=None, - langs=None, - token_type_ids=None, - position_ids=None, - lengths=None, - cache=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - labels=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + langs: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + lengths: Optional[Union[np.ndarray, tf.Tensor]] = None, + cache: Optional[Dict[str, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: bool = False, ): if input_ids is not None: num_choices = shape_list(input_ids)[1] @@ -1147,20 +1148,20 @@ def __init__(self, config, *inputs, **kwargs): ) def call( self, - input_ids=None, - attention_mask=None, - langs=None, - token_type_ids=None, - position_ids=None, - lengths=None, - cache=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - labels=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + langs: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + lengths: Optional[Union[np.ndarray, tf.Tensor]] = None, + cache: Optional[Dict[str, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: bool = False, ): r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): @@ -1232,21 +1233,21 @@ def __init__(self, config, *inputs, **kwargs): ) def call( self, - input_ids=None, - attention_mask=None, - langs=None, - token_type_ids=None, - position_ids=None, - lengths=None, - cache=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - start_positions=None, - end_positions=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + langs: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + lengths: Optional[Union[np.ndarray, tf.Tensor]] = None, + cache: Optional[Dict[str, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, + end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: bool = False, ): r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): From e162cebfa3e9ca707d0d226a55b17cbe2c7ba719 Mon Sep 17 00:00:00 2001 From: Bibhabasu Mohapatra <68384968+bibhabasumohapatra@users.noreply.github.com> Date: Fri, 7 Oct 2022 18:53:24 +0530 Subject: [PATCH 485/539] add ONNX support for swin transformer (#19390) * swin transformer onnx support * Updated image dimensions as dynamic Co-authored-by: lewtun Co-authored-by: lewtun --- docs/source/en/serialization.mdx | 1 + src/transformers/models/swin/__init__.py | 4 ++-- .../models/swin/configuration_swin.py | 23 +++++++++++++++++++ src/transformers/onnx/features.py | 3 +++ tests/onnx/test_onnx_v2.py | 1 + 5 files changed, 30 insertions(+), 2 deletions(-) diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index 903d35da4c4cd3..c6bd29bc63d8ac 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -94,6 +94,7 @@ Ready-made configurations include the following architectures: - RoFormer - SegFormer - SqueezeBERT +- Swin Transformer - T5 - ViT - XLM diff --git a/src/transformers/models/swin/__init__.py b/src/transformers/models/swin/__init__.py index 33a9bddeea7332..63809f369bc51f 100644 --- a/src/transformers/models/swin/__init__.py +++ b/src/transformers/models/swin/__init__.py @@ -21,7 +21,7 @@ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available -_import_structure = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig"]} +_import_structure = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]} try: @@ -53,7 +53,7 @@ ] if TYPE_CHECKING: - from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig + from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): diff --git a/src/transformers/models/swin/configuration_swin.py b/src/transformers/models/swin/configuration_swin.py index 878a73e9208b5e..b1d0ceb9bb889b 100644 --- a/src/transformers/models/swin/configuration_swin.py +++ b/src/transformers/models/swin/configuration_swin.py @@ -14,7 +14,13 @@ # limitations under the License. """ Swin Transformer model configuration""" +from collections import OrderedDict +from typing import Mapping + +from packaging import version + from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig from ...utils import logging @@ -145,3 +151,20 @@ def __init__( # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1)) + + +class SwinOnnxConfig(OnnxConfig): + + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index 535686f179a53c..4d1af87465fbcf 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -471,6 +471,9 @@ class FeaturesManager: "question-answering", onnx_config_cls="models.squeezebert.SqueezeBertOnnxConfig", ), + "swin": supported_features_mapping( + "default", "image-classification", "masked-im", onnx_config_cls="models.swin.SwinOnnxConfig" + ), "t5": supported_features_mapping( "default", "default-with-past", diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index f3c19ed8fa9872..dac4a25803c04f 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -217,6 +217,7 @@ def test_values_override(self): ("longformer", "allenai/longformer-base-4096"), ("yolos", "hustvl/yolos-tiny"), ("segformer", "nvidia/segformer-b0-finetuned-ade-512-512"), + ("swin", "microsoft/swin-tiny-patch4-window7-224"), } PYTORCH_EXPORT_WITH_PAST_MODELS = { From b29ebdf4d80ded12fded8d5e0ed8e510c157f1c9 Mon Sep 17 00:00:00 2001 From: h Date: Fri, 7 Oct 2022 06:26:23 -0700 Subject: [PATCH 486/539] removes prophet config dependencies from xlm-prophet (#19400) --- .../configuration_xlm_prophetnet.py | 158 +++++++++++++++++- 1 file changed, 151 insertions(+), 7 deletions(-) diff --git a/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py b/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py index 3025ed29f64328..cdca20ef3b43a8 100644 --- a/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py +++ b/src/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py @@ -15,8 +15,10 @@ """ XLM-ProphetNet model configuration""" +from typing import Callable, Optional, Union + +from ...configuration_utils import PretrainedConfig from ...utils import logging -from ..prophetnet.configuration_prophetnet import ProphetNetConfig logger = logging.get_logger(__name__) @@ -28,13 +30,155 @@ } -class XLMProphetNetConfig(ProphetNetConfig): - """ - This class overrides [`ProphetNetConfig`]. Please check the superclass for the appropriate documentation alongside - usage examples. Instantiating a configuration with the defaults will yield a similar configuration to that of the - XLMProphetNet +class XLMProphetNetConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`XLMProphetNetModel`]. It is used to instantiate a + XLMProphetNet model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the XLMProphetNet [microsoft/xprophetnet-large-wiki100-cased](https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased) architecture. - """ + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + activation_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for activations inside the fully connected layer. + activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by + the `inputs_ids` passed when calling [`XLMProphetNetModel`]. + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the layers and the pooler layer. + encoder_ffn_dim (`int`, *optional*, defaults to 4096): + Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. + num_encoder_layers (`int`, *optional*, defaults to 12): + Number of encoder layers. + num_encoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_ffn_dim (`int`, *optional*, defaults to 4096): + Dimensionality of the `intermediate` (often named feed-forward) layer in decoder. + num_decoder_layers (`int`, *optional*, defaults to 12): + Number of decoder layers. + num_decoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer decoder. + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + add_cross_attention (`bool`, *optional*, defaults to `True`): + Whether cross-attention layers should be added to the model. + is_encoder_decoder (`bool`, *optional*, defaults to `True`): + Whether this is an encoder/decoder model. + pad_token_id (`int`, *optional*, defaults to 1) + Padding token id. + bos_token_id (`int`, *optional*, defaults to 0) + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 2) + End of stream token id. + ngram (`int`, *optional*, defaults to 2) + Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first + token. + num_buckets (`int`, *optional*, defaults to 32) + The number of buckets to use for each attention layer. This is for relative position calculation. See the + [T5 paper](see https://arxiv.org/abs/1910.10683) for more details. + relative_max_distance (`int`, *optional*, defaults to 128) + Relative distances greater than this number will be put into the last same bucket. This is for relative + position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details. + disable_ngram_loss (`bool`, *optional*, defaults to `False`): + Whether be trained predicting only the next first token. + eps (`float`, *optional*, defaults to 0.0): + Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label + smoothing is performed. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + """ model_type = "xlm-prophetnet" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "num_attention_heads": "num_encoder_attention_heads", + } + + def __init__( + self, + activation_dropout: Optional[float] = 0.1, + activation_function: Optional[Union[str, Callable]] = "gelu", + vocab_size: Optional[int] = 30522, + hidden_size: Optional[int] = 1024, + encoder_ffn_dim: Optional[int] = 4096, + num_encoder_layers: Optional[int] = 12, + num_encoder_attention_heads: Optional[int] = 16, + decoder_ffn_dim: Optional[int] = 4096, + num_decoder_layers: Optional[int] = 12, + num_decoder_attention_heads: Optional[int] = 16, + attention_dropout: Optional[float] = 0.1, + dropout: Optional[float] = 0.1, + max_position_embeddings: Optional[int] = 512, + init_std: Optional[float] = 0.02, + is_encoder_decoder: Optional[bool] = True, + add_cross_attention: Optional[bool] = True, + decoder_start_token_id: Optional[int] = 0, + ngram: Optional[int] = 2, + num_buckets: Optional[int] = 32, + relative_max_distance: Optional[int] = 128, + disable_ngram_loss: Optional[bool] = False, + eps: Optional[float] = 0.0, + use_cache: Optional[bool] = True, + pad_token_id: Optional[int] = 0, + bos_token_id: Optional[int] = 1, + eos_token_id: Optional[int] = 2, + **kwargs + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.encoder_ffn_dim = encoder_ffn_dim + self.num_encoder_layers = num_encoder_layers + self.num_encoder_attention_heads = num_encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.num_decoder_layers = num_decoder_layers + self.num_decoder_attention_heads = num_decoder_attention_heads + self.max_position_embeddings = max_position_embeddings + self.init_std = init_std # Normal(0, this parameter) + self.activation_function = activation_function + + # parameters for xlmprophetnet + self.ngram = ngram + self.num_buckets = num_buckets + self.relative_max_distance = relative_max_distance + self.disable_ngram_loss = disable_ngram_loss + self.eps = eps + + # 3 Types of Dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.dropout = dropout + + self.use_cache = use_cache + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + is_encoder_decoder=is_encoder_decoder, + add_cross_attention=add_cross_attention, + decoder_start_token_id=decoder_start_token_id, + **kwargs, + ) + + @property + def num_hidden_layers(self) -> int: + return self.num_encoder_layers + self.num_decoder_layers + + @num_hidden_layers.setter + def num_hidden_layers(self, value): + raise NotImplementedError( + "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" + " `num_decoder_layers`." + ) From 41ec5d0ced34558163f8fea20ec1518a4351f24e Mon Sep 17 00:00:00 2001 From: Thomas Date: Fri, 7 Oct 2022 21:44:58 +0800 Subject: [PATCH 487/539] Added type hints for TF: TransfoXL (#19380) * Added type hints for TF: TransfoXL * Added type hints for TF: TransfoXL * Change type hints for training * Change type hints for training --- .../transfo_xl/modeling_tf_transfo_xl.py | 51 ++++++++++--------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py index 53370c1d3e7cfc..e1e23773a27f54 100644 --- a/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py +++ b/src/transformers/models/transfo_xl/modeling_tf_transfo_xl.py @@ -542,14 +542,15 @@ def _update_mems(self, hids, mems, mlen, qlen): @unpack_inputs def call( self, - input_ids=None, - mems=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + mems: Optional[List[tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: bool = False, ): # the original code for Transformer-XL used shapes [len, bsz] but we want a unified interface in the library @@ -894,14 +895,14 @@ def __init__(self, config, *inputs, **kwargs): ) def call( self, - input_ids=None, - mems=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + mems: Optional[List[tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, ): outputs = self.transformer( input_ids=input_ids, @@ -974,15 +975,15 @@ def init_mems(self, bsz): ) def call( self, - input_ids=None, - mems=None, - head_mask=None, - inputs_embeds=None, - output_attentions=None, - output_hidden_states=None, - return_dict=None, - labels=None, - training=False, + input_ids: Optional[TFModelInputType] = None, + mems: Optional[List[tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: bool = False, ): if input_ids is not None: bsz, tgt_len = shape_list(input_ids)[:2] From 56af8df35966780f413c0b407ef65af087a497cd Mon Sep 17 00:00:00 2001 From: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Date: Fri, 7 Oct 2022 19:16:55 +0530 Subject: [PATCH 488/539] HF <-> megatron checkpoint reshaping and conversion for GPT (#19317) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * HF <-> megatron checkpoint conversion handling reshaping from different tensor and parallel sizes * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * addressing comments * add doc strings and 🐛 fixes Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- ...eckpoint_reshaping_and_interoperability.py | 900 ++++++++++++++++++ 1 file changed, 900 insertions(+) create mode 100644 src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py diff --git a/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py b/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py new file mode 100644 index 00000000000000..0f99b19e6e8546 --- /dev/null +++ b/src/transformers/models/megatron_gpt2/checkpoint_reshaping_and_interoperability.py @@ -0,0 +1,900 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import json +import os +import re +import sys +import types + +import torch + +from transformers import AutoTokenizer, GPT2Config +from transformers.modeling_utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME, shard_checkpoint + + +def add_checkpointing_args(parser): + parser.add_argument("--megatron-path", type=str, default=None, help="Base directory of Megatron repository") + parser.add_argument( + "--convert_checkpoint_from_megatron_to_transformers", + action="store_true", + help=( + "If True, convert a Megatron checkpoint to a Transformers checkpoint. " + "If False, convert a Transformers checkpoint to a Megatron checkpoint." + ), + ) + parser.add_argument( + "--load_path", + type=str, + required=True, + help="Path to the checkpoint to convert.", + ) + parser.add_argument( + "--save_path", + type=str, + required=True, + help="Path to the converted checkpoint.", + ) + parser.add_argument("--print-checkpoint-structure", action="store_true") + return parser + + +def add_megatron_checkpoint_args(parser): + parser.add_argument( + "--target_tensor_model_parallel_size", + type=int, + default=1, + help=( + "The tensor model parallel size of the converted checkpoint. " + "Only used when converting a Transformers checkpoint to a Megatron checkpoint." + ), + ) + parser.add_argument( + "--target_pipeline_model_parallel_size", + type=int, + default=1, + help=( + "The pipeline model parallel size of the converted checkpoint. " + "Only used when converting a Transformers checkpoint to a Megatron checkpoint." + ), + ) + parser.add_argument( + "--target_data_parallel_size", + type=int, + default=1, + help=( + "The data parallel size of the converted checkpoint. " + "Only used when converting a Transformers checkpoint to a Megatron checkpoint." + ), + ) + parser.add_argument( + "--target_params_dtype", + type=str, + default="fp32", + help=( + "The dtype of the converted checkpoint. " + "Only used when converting a Transformers checkpoint to a Megatron checkpoint." + ), + ) + parser.add_argument( + "--make_vocab_size_divisible_by", + type=int, + default=128, + help=( + "Pad the vocab size to be divisible by this value. " + "This is added for computational efficieny reasons. " + "Only used when converting a Transformers checkpoint to a Megatron checkpoint." + ), + ) + parser.add_argument( + "--use_distributed_optimizer", + action="store_true", + help=( + "If True, use the distributed optimizer. " + "Only used when converting a Transformers checkpoint to a Megatron checkpoint." + ), + ) + return parser + + +def add_transformers_checkpoint_args(parser): + parser.add_argument( + "--tokenizer_name", + type=str, + default=None, + help=( + "The name of the pre-trained tokenizer to save. " + "If not None, the tokenizer will be saved. " + "Only used when converting a Megatron checkpoint to a Transformers checkpoint." + ), + ) + parser.add_argument( + "--max_shard_size", + type=str, + default="10GB", + help=( + "The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size " + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`). " + "Only used when converting a Megatron checkpoint to a Transformers checkpoint." + ), + ) + + return parser + + +# The simple map of names for "automated" rules. +megatron_to_transformers = { + "attention.dense": ".attn.c_proj.", + "self_attention.dense": ".attn.c_proj.", + "mlp.dense_h_to_4h": ".mlp.c_fc.", + "mlp.dense_4h_to_h": ".mlp.c_proj.", +} +transformers_to_megatron = {v[1:-1]: k for k, v in megatron_to_transformers.items()} + +tensor_parallel_params = [ + # megatron-lm layers to merge across tp ranks + "self_attention.query_key_value.weight", + "self_attention.query_key_value.bias", + "self_attention.dense.weight", + "mlp.dense_h_to_4h.weight", + "mlp.dense_h_to_4h.bias", + "mlp.dense_4h_to_h.weight", + # deprecated + "attention.query_key_value.weight", + "attention.query_key_value.bias", + "attention.dense.weight", + # transformers layers to split across tp ranks + "attn.c_attn.weight", + "attn.c_attn.bias", + "attn.c_proj.weight", + "mlp.c_fc.weight", + "mlp.c_fc.bias", + "mlp.c_proj.weight", +] + + +def recursive_print(name, val, spaces=0): + """ + Recursively print the structure of a checkpoint. This function is taken from `convert_megatron_gpt2_checkpoint.py` + + Args: + name (str): the name of the current tensor parameter + val (Tuple(int)): the shape of the current tensor parameter + spaces (int): the number of spaces to print before the output for a nested structure + """ + # Format the message. + if name is None: + msg = None + else: + fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}" + msg = fmt.format(name) + + # Print and recurse (if needed). + if isinstance(val, dict): + if msg is not None: + print(msg) + for k in val.keys(): + recursive_print(k, val[k], spaces + 2) + elif isinstance(val, torch.Tensor): + print(msg, ":", val.size()) + else: + print(msg, ":", val) + + +def megatron_to_transformers_fix_query_key_value_ordering( + param, checkpoint_version, num_splits, num_heads, hidden_size +): + """ + Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] for compatibility with later versions + of NVIDIA Megatron-LM. The inverse operation is performed inside Megatron-LM to read checkpoints: + https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 If param is the weight tensor of the + self-attention block, the returned tensor will have to be transposed one more time to be read by HuggingFace GPT2. + This function is taken from `convert_megatron_gpt2_checkpoint.py` + + Args: + param (torch.Tensor): the tensor to permute + checkpoint_version (int): the version of the checkpoint. + num_splits (int): the number of projections, usually 3 for (Query, Key, Value) + num_heads (int): the number of attention heads + hidden_size (int): the hidden size per head + """ + + input_shape = param.size() + if checkpoint_version == 1.0: + # version 1.0 stores [num_heads * hidden_size * num_splits, :] + saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:] + param = param.view(*saved_shape) + param = param.transpose(0, 2) + param = param.transpose(1, 2).contiguous() + elif checkpoint_version >= 2.0: + # other versions store [num_heads * num_splits * hidden_size, :] + saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:] + param = param.view(*saved_shape) + param = param.transpose(0, 1).contiguous() + param = param.view(*input_shape) + return param + + +def transformers_to_megatron_fix_query_key_value_ordering( + param, checkpoint_version, num_splits, num_heads, hidden_size +): + """ + Permutes layout of param tensor to the one compatible with respective NVIDIA Megatron-LM chekpoint versions. Input + is [num_splits * num_heads * hidden_size, :] and output is [num_heads * hidden_size * num_splits, :] for version + 1.0 and [num_heads * num_splits * hidden_size, :] for version 2.0 and later. If param is the weight tensor of the + self-attention block, the param needs to be already transposed before calling this function. + + Args: + param (torch.Tensor): the tensor to permute + checkpoint_version (int): the version of the checkpoint. + num_splits (int): the number of projections, usually 3 for (Query, Key, Value) + num_heads (int): the number of attention heads + hidden_size (int): the hidden size per head + """ + + # Input is [num_splits * num_heads * hidden_size, :] + input_shape = param.size() + if checkpoint_version == 1.0: + # version 1.0 stores [num_heads * hidden_size * num_splits, :] + current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:] + param = param.view(*current_shape) + param = param.transpose(0, 2) + param = param.transpose(1, 2).contiguous() + elif checkpoint_version >= 2.0: + # other versions store [num_heads * num_splits * hidden_size, :] + current_shape = (num_splits, num_heads, hidden_size) + input_shape[1:] + param = param.view(*current_shape) + param = param.transpose(0, 1).contiguous() + param = param.view(*input_shape) + return param + + +def merge_transformers_sharded_states(path, num_checkpoints): + """ + Merge sharded checkpoints from transformers into a single checkpoint. + + Args: + path (str): the path to the sharded checkpoints + num_checkpoints (int): the number of checkpoints to merge + """ + state_dict = {} + for i in range(1, num_checkpoints + 1): + checkpoint_path = os.path.join(path, f"pytorch_model-{i:05d}-of-{num_checkpoints:05d}.bin") + current_chunk = torch.load(checkpoint_path, map_location="cpu") + state_dict.update(current_chunk) + return state_dict + + +def get_megatron_sharded_states(args, tp_size, pp_size, pp_rank): + """ + Get sharded checkpoints from NVIDIA Megatron-LM checkpoint based on the provided tensor parallel size, pipeline + parallel size and pipeline parallel rank. + + Args: + args (argparse.Namespace): the arguments to the script + tp_size (int): the tensor parallel size + pp_size (int): the pipeline parallel size + pp_rank (int): the pipeline parallel rank + """ + tp_state_dicts = [] + for i in range(tp_size): + sub_dir_name = f"mp_rank_{i:02d}" if pp_size == 1 else f"mp_rank_{i:02d}_{pp_rank:03d}" + checkpoint_name = os.listdir(os.path.join(args.load_path, sub_dir_name))[0] + checkpoint_path = os.path.join(args.load_path, sub_dir_name, checkpoint_name) + state_dict = torch.load(checkpoint_path, map_location="cpu") + tp_state_dicts.append(state_dict) + return tp_state_dicts + + +def get_element_from_dict_by_path(d, path): + """ + Get element from dictionary by path. If element is not present, recursively add empty dictionaries. + + Args: + d (dict): the dictionary to get the element from + path (list): the path to the element which is delimited by "." + """ + path = path.split(".") + for k in path: + if k not in d: + d[k] = {} + d = d[k] + return d + + +def convert_checkpoint_from_megatron_to_transformers(args): + """ + Convert NVIDIA Megatron-LM checkpoint to HuggingFace Transformers checkpoint. This handles Megatron checkpoints + with different tensor parallelism and pipeline parallelism sizes. It saves the converted checkpoint into shards + using HuggingFace Transformers checkpoint sharding functionality. This greatly extends the functionality of + `convert_megatron_gpt2_checkpoint.py` + + Args: + args (argparse.Namespace): the arguments to the script + """ + # Load Megatron-LM checkpoint arguments from the state dict + sub_dirs = os.listdir(args.load_path) + possible_sub_dirs = ["mp_rank_00", "mp_rank_00_000"] + for sub_dir in possible_sub_dirs: + if sub_dir in sub_dirs: + rank0_checkpoint_name = os.listdir(os.path.join(args.load_path, sub_dir))[0] + rank0_checkpoint_path = os.path.join(args.load_path, sub_dir, rank0_checkpoint_name) + break + print(f"Loading Megatron-LM checkpoint arguments from: {rank0_checkpoint_path}") + state_dict = torch.load(rank0_checkpoint_path, map_location="cpu") + megatron_args = state_dict.get("args", None) + if megatron_args is None: + raise ValueError( + "Megatron-LM checkpoint does not contain arguments. This utility only supports Megatron-LM checkpoints" + " containing all the megatron arguments. This is because it loads all config related to model" + " architecture, the tensor and pipeline model parallel size from the checkpoint insead of user having to" + " manually specify all the details. Please save Megatron-LM checkpoint along with all the megatron" + " arguments to use this utility." + ) + + # Create Transformers GPT2 config from Megatron-LM arguments + if megatron_args is not None: + if megatron_args.bias_gelu_fusion: + activation_function = "gelu_fast" + elif megatron_args.openai_gelu: + activation_function = "gelu_new" + else: + activation_function = "gelu" + else: + # in the very early days this used to be "gelu_new" + activation_function = "gelu_new" + vocab_size = ( + megatron_args.padded_vocab_size + if getattr(megatron_args, "orig_vocab_size", None) is None + else megatron_args.orig_vocab_size + ) + print(vocab_size) + + config = GPT2Config( + vocab_size=vocab_size, + n_positions=megatron_args.max_position_embeddings, + n_embd=megatron_args.hidden_size, + n_layer=megatron_args.num_layers, + n_head=megatron_args.num_attention_heads, + n_inner=megatron_args.ffn_hidden_size, + activation_function=activation_function, + resid_pdrop=0.1, + embd_pdrop=0.1, + attn_pdrop=0.1, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + summary_type="cls_index", + summary_use_proj=True, + summary_activation=None, + summary_proj_to_labels=True, + summary_first_dropout=0.1, + scale_attn_weights=True, + use_cache=True, + bos_token_id=vocab_size - 1, + eos_token_id=vocab_size - 1, + architectures=["GPT2LMHeadModel"], + ) + + output_state_dict = {} + + checkpoint_version = state_dict.get("checkpoint_version", 0.0) + tp_size = megatron_args.tensor_model_parallel_size + pp_size = megatron_args.pipeline_model_parallel_size + dtype = torch.float32 + # The regex to extract layer names. + layer_re = re.compile("layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") + + # Convert. + print("Converting") + + # Embeddings + print("Converting embeddings") + tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, 0) + + # Convert and store the position embeddings. + position_embeddings = get_element_from_dict_by_path( + tp_state_dicts[0], "model.language_model.embedding.position_embeddings.weight" + ) + output_state_dict["transformer.wpe.weight"] = position_embeddings.to(dtype) + + # Convert and store the word embeddings. + word_embeddings = torch.cat( + [ + get_element_from_dict_by_path( + tp_state_dicts[tp_rank], "model.language_model.embedding.word_embeddings.weight" + ) + for tp_rank in range(tp_size) + ], + dim=0, + ) + word_embeddings = word_embeddings[:vocab_size].to(dtype) + output_state_dict["transformer.wte.weight"] = word_embeddings + + # Transformer Layers + print("Converting transformer layers") + # The number of heads. + heads = config.n_head + # The hidden_size per head. + hidden_size_per_head = config.n_embd // config.n_head + n_positions = config.n_positions + num_layers = config.num_hidden_layers // pp_size + + for pp_rank in range(pp_size): + if pp_size > 0: + print(f"Converting pipeline parallel rank {pp_rank}") + tp_state_dicts = get_megatron_sharded_states(args, tp_size, pp_size, pp_rank) + + # The transformer. + path = ( + "model.language_model.transformer" + if "transformer" in get_element_from_dict_by_path(tp_state_dicts[0], "model.language_model").keys() + else "model.language_model.encoder" + ) + # Extract the layers. + for key, val in get_element_from_dict_by_path(tp_state_dicts[0], path).items(): + # Match the name. + m = layer_re.match(key) + # Stop if that's not a layer + if m is None: + break + + # The index of the layer. + layer_idx = int(m.group(1)) + pp_rank * num_layers + # The name of the operation. + op_name = m.group(2) + # Is it a weight or a bias? + weight_or_bias = m.group(3) + + # The name of the layer. + layer_name = f"transformer.h.{layer_idx}" + + if op_name + "." + weight_or_bias not in tensor_parallel_params: + params = val.to(dtype) + else: + dim = 1 if op_name in ["self_attention.dense", "mlp.dense_4h_to_h", "attention.dense"] else 0 + params = torch.cat( + [val] + + [ + get_element_from_dict_by_path(tp_state_dicts[tp_rank], f"{path}")[key] + for tp_rank in range(1, tp_size) + ], + dim=dim, + ).to(dtype) + + # For layernorm(s), simply store the layer norm. + if op_name.endswith("layernorm"): + + ln_name = "ln_1" if op_name.startswith("input") else "ln_2" + output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = params + + # Transpose the QKV matrix. + elif ( + op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" + ) and weight_or_bias == "weight": + + # Insert a tensor of 1x1xDxD bias. + causal_mask = torch.tril(torch.ones((n_positions, n_positions), dtype=dtype)).view( + 1, 1, n_positions, n_positions + ) + output_state_dict[layer_name + ".attn.bias"] = causal_mask + + # Insert a "dummy" tensor for masked_bias. + masked_bias = torch.tensor(-1e4, dtype=dtype) + output_state_dict[layer_name + ".attn.masked_bias"] = masked_bias + + out_val = megatron_to_transformers_fix_query_key_value_ordering( + params, + checkpoint_version, + 3, + heads, + hidden_size_per_head, + ) + # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. + out_val = out_val.transpose(0, 1).contiguous() + # Store. + output_state_dict[layer_name + ".attn.c_attn.weight"] = out_val + + # Transpose the bias. + elif ( + op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" + ) and weight_or_bias == "bias": + + out_val = megatron_to_transformers_fix_query_key_value_ordering( + params, checkpoint_version, 3, heads, hidden_size_per_head + ) + # Store. No change of shape. + output_state_dict[layer_name + ".attn.c_attn.bias"] = out_val + + # Transpose the weights. + elif weight_or_bias == "weight": + + out_name = megatron_to_transformers[op_name] + output_state_dict[layer_name + out_name + "weight"] = params.transpose(0, 1) + + # Copy the bias. + elif weight_or_bias == "bias": + + out_name = megatron_to_transformers[op_name] + output_state_dict[layer_name + out_name + "bias"] = params + + if config.n_layer != (layer_idx + 1): + raise ValueError(f"Expected {config.n_layer} layers but found {layer_idx + 1}") + + # The final layernorm. + print("Converting final layernorm") + params = get_element_from_dict_by_path(tp_state_dicts[0], str(path)) + output_state_dict["transformer.ln_f.weight"] = params["final_layernorm.weight"].to(dtype) + output_state_dict["transformer.ln_f.bias"] = params["final_layernorm.bias"].to(dtype) + + # For LM head, transformers' wants the matrix to weight embeddings. + print("Converting LM head") + output_state_dict["lm_head.weight"] = word_embeddings.to(dtype) + + # It should be done! + print("Conversion from Megatron-LM to Transformers is done!") + + # Print the structure of converted state dict. + if args.print_checkpoint_structure: + recursive_print(None, output_state_dict) + + # Add tokenizer class info to config + # see https://github.com/huggingface/transformers/issues/13906) + + if args.tokenizer_name is None: + tokenizer_name = "gpt2" + else: + tokenizer_name = args.tokenizer_name + + tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) + tokenizer_class = type(tokenizer).__name__ + config.tokenizer_class = tokenizer_class + + # Store the config to file. + print("Saving config") + config.save_pretrained(args.save_path) + + # Save tokenizer based on args + if args.tokenizer_name is not None: + print(f"Adding {tokenizer_class} tokenizer files") + tokenizer.save_pretrained(args.save_path) + + # Store the state_dict to file. + max_shard_size = int(args.max_shard_size) if args.max_shard_size.isdigit() else args.max_shard_size + shards, index = shard_checkpoint(output_state_dict, max_shard_size=max_shard_size) + + # Save the model + for shard_file, shard in shards.items(): + torch.save(shard, os.path.join(args.save_path, shard_file)) + + if index is None: + print(f"Model weights saved in {os.path.join(args.save_path, WEIGHTS_NAME)}") + else: + save_index_file = os.path.join(args.save_path, WEIGHTS_INDEX_NAME) + # Save the index as well + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + print( + f"The model is bigger than the maximum size per checkpoint ({args.max_shard_size}) and is going to be " + f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " + f"index located at {save_index_file}." + ) + + +def convert_checkpoint_from_transformers_to_megatron(args): + """ + Convert a checkpoint from HuggingFace Transformers to Megatron-LM. This allows converted checkpoints with variable + tensor parallelism and pipeline parallelism sizes. It takes as input a checkpoint from HuggingFace Transformers + which can have multiple shards. + + Args: + args (argparse.Namespace): the arguments to the script + + """ + os.makedirs(args.save_path, exist_ok=True) + # Search in directory above this + sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) + if args.megatron_path is not None: + sys.path.insert(0, args.megatron_path) + + try: + from megatron.tokenizer.tokenizer import _vocab_size_with_padding + except ModuleNotFoundError: + print("Unable to import Megatron, please specify the path to Megatron using --megatron-path. Exiting.") + exit(1) + + # load the transformers model state dict and config + sub_dirs = [x for x in os.listdir(args.load_path) if x.startswith("pytorch_model")] + if len(sub_dirs) == 1: + checkpoint_name = "pytorch_model.bin" + state_dict = torch.load(os.path.join(args.load_path, checkpoint_name), map_location="cpu") + else: + num_checkpoints = len(sub_dirs) - 1 + state_dict = merge_transformers_sharded_states(args.load_path, num_checkpoints) + + config = GPT2Config.from_pretrained(args.load_path) + + # Saving the tracker file + tracker_filepath = os.path.join(args.save_path, "latest_checkpointed_iteration.txt") + with open(tracker_filepath, "w") as f: + f.write("release") + + # create `release` dir in args.load_path + release_dir = os.path.join(args.save_path, "release") + os.makedirs(release_dir, exist_ok=True) + + # megatron args + megatron_args = { + "orig_vocab_size": config.vocab_size, + "max_position_embeddings": config.n_positions, + "hidden_size": config.n_embd, + "num_layers": config.n_layer, + "num_attention_heads": config.n_head, + "ffn_hidden_size": config.n_inner, + "tensor_model_parallel_size": args.target_tensor_model_parallel_size, + "pipeline_model_parallel_size": args.target_pipeline_model_parallel_size, + "data_parallel_size": args.target_data_parallel_size, + "make_vocab_size_divisible_by": args.make_vocab_size_divisible_by, + "rank": 0, + "tokenizer_type": None, + } + + if config.activation_function == "gelu": + megatron_args["bias_gelu_fusion"] = False + megatron_args["openai_gelu"] = False + elif config.activation_function == "gelu_fast": + megatron_args["bias_gelu_fusion"] = True + megatron_args["openai_gelu"] = False + elif config.activation_function == "gelu_new": + megatron_args["bias_gelu_fusion"] = False + megatron_args["openai_gelu"] = True + + margs = types.SimpleNamespace() + for k, v in megatron_args.items(): + setattr(margs, k, v) + + # params dtype + if args.target_params_dtype == "fp16": + dtype = torch.float16 + elif args.target_params_dtype == "bf16": + dtype = torch.bfloat16 + else: + dtype = torch.float32 + setattr(margs, "params_dtype", dtype) + + # save dummy optim state dict + dummy_optim_state_dict = {} + dummy_optim_state_dict["optimizer"] = { + "step": 0, + "param_groups": [ + { + "lr": 0.0, + "beta1": 0.0, + "beta2": 0.0, + "eps": 0.0, + "weight_decay": 0.0, + "correct_bias": False, + "params": [], + } + ], + } + if args.use_distributed_optimizer: + for i in range(args.target_pipeline_model_parallel_size): + for j in range(args.target_tensor_model_parallel_size): + for k in range(args.target_data_parallel_size): + if args.target_pipeline_model_parallel_size == 1: + checkpoint_dir = f"mp_rank_{i:02d}_{k:03d}" + else: + checkpoint_dir = f"mp_rank_{i:02d}_{j:03d}_{k:03d}" + checkpoint_dir = os.path.join(release_dir, checkpoint_dir) + os.makedirs(checkpoint_dir, exist_ok=True) + torch.save( + dummy_optim_state_dict, + os.path.join(checkpoint_dir, "optim.pt"), + ) + + # Convert. + print("Converting") + output_state_dict = [] + for i in range(args.target_tensor_model_parallel_size): + output_state_dict.append({}) + + # Embedding layer + print("converting embedding layer") + pos_embedding = state_dict["transformer.wpe.weight"].to(dtype) + word_embedding = state_dict["transformer.wte.weight"].to(dtype) + orig_vocab_size = config.vocab_size + padded_vocab_size = _vocab_size_with_padding(orig_vocab_size, margs) + setattr(margs, "padded_vocab_size", padded_vocab_size) + # Cut out extra padding we don't need + if orig_vocab_size > padded_vocab_size: + full_word_embed = word_embedding[0:padded_vocab_size, :] + # Expanding embedding to larger size by replicating final entry + elif orig_vocab_size < padded_vocab_size: + padding_size = padded_vocab_size - orig_vocab_size + full_word_embed = torch.cat((word_embedding, word_embedding[-1].unsqueeze(0).expand(padding_size, -1))) + # Same size! + else: + full_word_embed = word_embedding + + # Split into new tensor model parallel sizes + out_word_embed = torch.chunk(full_word_embed, args.target_tensor_model_parallel_size, dim=0) + for i in range(args.target_tensor_model_parallel_size): + pos_emb_dict = get_element_from_dict_by_path( + output_state_dict[i], "model.language_model.embedding.position_embeddings" + ) + pos_emb_dict["weight"] = pos_embedding + + word_emb_dict = get_element_from_dict_by_path( + output_state_dict[i], "model.language_model.embedding.word_embeddings" + ) + word_emb_dict["weight"] = out_word_embed[i] + + # Transformer layers + print("converting transformer layers") + if config.num_hidden_layers % args.target_tensor_model_parallel_size != 0: + raise ValueError( + f"Number of layers ({config.num_hidden_layers}) must be divisible by number of tensor parallelism" + f" ({args.target_tensor_model_parallel_size})" + ) + num_layers = config.num_hidden_layers // args.target_pipeline_model_parallel_size + + layer_re = re.compile("transformer.h\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") + # The number of heads. + heads = config.n_head + # The hidden_size per head. + hidden_size_per_head = config.n_embd // config.n_head + for pp_rank in range(args.target_pipeline_model_parallel_size): + layer_offset = pp_rank * num_layers + if pp_rank > 0: + output_state_dict = [] + for i in range(args.target_tensor_model_parallel_size): + output_state_dict.append({}) + + for layer in range(num_layers): + pp_layer_id = layer + layer_offset + layers_to_copy = [ + layer_name + for layer_name in state_dict.keys() + if layer_name.startswith(f"transformer.h.{pp_layer_id}.") + ] + + for layer_name in layers_to_copy: + m = layer_re.match(layer_name) + # Stop if that's not a layer + if m is None: + break + + # The index of the layer. + _ = int(m.group(1)) + # The name of the operation. + op_name = m.group(2) + # Is it a weight or a bias? + weight_or_bias = m.group(3) + + params = state_dict[layer_name].to(dtype) + # handle layernorm + if op_name.startswith("ln"): + out_name = "input_layernorm" if op_name.endswith("1") else "post_attention_layernorm" + layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}" + + # handle attention K, V, Q weights + elif op_name.startswith("attn.c_attn") and weight_or_bias == "weight": + # transformers stores D X (3*D) but Megatron-LM expects (3*D) X D. + params = params.transpose(0, 1).contiguous() + + params = transformers_to_megatron_fix_query_key_value_ordering( + params, + 3.0, + 3, + heads, + hidden_size_per_head, + ) + layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}" + + # handle attention K, V, Q bias + elif op_name.startswith("attn.c_attn") and weight_or_bias == "bias": + params = transformers_to_megatron_fix_query_key_value_ordering( + params, + 3.0, + 3, + heads, + hidden_size_per_head, + ) + layer_name = f"layers.{layer}.self_attention.query_key_value.{weight_or_bias}" + + # handle attention and mlp weights + elif weight_or_bias == "weight": + out_name = transformers_to_megatron.get(op_name, None) + if out_name is None: + continue + params = params.transpose(0, 1) + layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}" + + # handle attention and mlp bias + elif weight_or_bias == "bias": + out_name = transformers_to_megatron.get(op_name, None) + if out_name is None: + continue + layer_name = f"layers.{layer}.{out_name}.{weight_or_bias}" + + # skip + else: + continue + + if op_name + "." + weight_or_bias in tensor_parallel_params: + dim = 1 if op_name in ["attn.c_proj", "mlp.c_proj"] else 0 + params = torch.chunk(params, args.target_tensor_model_parallel_size, dim=dim) + + for i in range(args.target_tensor_model_parallel_size): + params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder") + params_dict[layer_name] = ( + params[i] if (op_name + "." + weight_or_bias in tensor_parallel_params) else params + ) + + if pp_rank == args.target_pipeline_model_parallel_size - 1: + # handle final layernorm + for weight_or_bias in ["weight", "bias"]: + params = state_dict[f"transformer.ln_f.{weight_or_bias}"].to(dtype) + layer_name = f"final_layernorm.{weight_or_bias}" + for i in range(args.target_tensor_model_parallel_size): + params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.language_model.encoder") + params_dict[layer_name] = params + + # add the LM head + for i in range(args.target_tensor_model_parallel_size): + params_dict = get_element_from_dict_by_path(output_state_dict[i], "model.word_embeddings_for_head") + params_dict["weight"] = out_word_embed[i] + + # saving the state dict as per the tp_rank and pp_rank + for tp_rank in range(args.target_tensor_model_parallel_size): + output_state_dict[tp_rank]["checkpoint_version"] = 3.0 + output_state_dict[tp_rank]["args"] = margs + checkpoint_dir = ( + f"mp_rank_{tp_rank:02d}" + if args.target_pipeline_model_parallel_size == 1 + else f"mp_rank_{tp_rank:02d}_{pp_rank:03d}" + ) + if args.use_distributed_optimizer: + checkpoint_name = "model_rng.pt" + else: + checkpoint_name = "model_optim_rng.pt" + output_state_dict[tp_rank]["optimizer"] = dummy_optim_state_dict["optimizer"] + checkpoint_dir = os.path.join(release_dir, checkpoint_dir) + os.makedirs(checkpoint_dir, exist_ok=True) + checkpoint_path = os.path.join(checkpoint_dir, checkpoint_name) + if args.print_checkpoint_structure: + print( + f"Checkpoint structure of model state dict shard belonging to TP rank {tp_rank} and PP rank" + f" {pp_rank}:" + ) + recursive_print(None, output_state_dict[tp_rank]) + torch.save(output_state_dict[tp_rank], checkpoint_path) + + +def main(): + parser = argparse.ArgumentParser() + parser = add_checkpointing_args(parser) + parser = add_megatron_checkpoint_args(parser) + parser = add_transformers_checkpoint_args(parser) + args = parser.parse_args() + if args.convert_checkpoint_from_megatron_to_transformers: + convert_checkpoint_from_megatron_to_transformers(args) + else: + convert_checkpoint_from_transformers_to_megatron(args) + + +if __name__ == "__main__": + main() From 331ea019d7053924ee4d9d4d30282a2c74c272a6 Mon Sep 17 00:00:00 2001 From: Omar Sanseviero Date: Fri, 7 Oct 2022 15:52:52 +0200 Subject: [PATCH 489/539] Remove unneded words from audio-related feature extractors (#19405) --- .../models/speech_to_text/feature_extraction_speech_to_text.py | 2 +- src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py | 2 +- src/transformers/models/whisper/feature_extraction_whisper.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py b/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py index 4294c48c71f0ee..af605626d0a848 100644 --- a/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py +++ b/src/transformers/models/speech_to_text/feature_extraction_speech_to_text.py @@ -136,7 +136,7 @@ def __call__( **kwargs ) -> BatchFeature: """ - Main method to featurize and prepare for the model one or several sequence(s). sequences. + Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): diff --git a/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py b/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py index 14b1d688c9d7a2..b3ceef27d39887 100644 --- a/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py +++ b/src/transformers/models/wav2vec2/feature_extraction_wav2vec2.py @@ -112,7 +112,7 @@ def __call__( **kwargs ) -> BatchFeature: """ - Main method to featurize and prepare for the model one or several sequence(s). sequences. + Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): diff --git a/src/transformers/models/whisper/feature_extraction_whisper.py b/src/transformers/models/whisper/feature_extraction_whisper.py index ce5de7b65afa81..33d53f150c9dd5 100644 --- a/src/transformers/models/whisper/feature_extraction_whisper.py +++ b/src/transformers/models/whisper/feature_extraction_whisper.py @@ -221,7 +221,7 @@ def __call__( **kwargs ) -> BatchFeature: """ - Main method to featurize and prepare for the model one or several sequence(s). sequences. + Main method to featurize and prepare for the model one or several sequence(s). Args: raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`): From e9a49babeecbcb23db97debd88c42da351822878 Mon Sep 17 00:00:00 2001 From: Amrit Sahu <88420255+sahamrit@users.noreply.github.com> Date: Fri, 7 Oct 2022 19:30:19 +0530 Subject: [PATCH 490/539] [WIP] Add ZeroShotObjectDetectionPipeline (#18445) (#18930) * Add ZeroShotObjectDetectionPipeline (#18445) * Add AutoModelForZeroShotObjectDetection task This commit also adds the following - Add explicit _processor method for ZeroShotObjectDetectionPipeline. This is necessary as pipelines don't auto infer processors yet and `OwlVitProcessor` wraps tokenizer and feature_extractor together, to process multiple images at once - Add auto tests and other tests for ZeroShotObjectDetectionPipeline * Add AutoModelForZeroShotObjectDetection task This commit also adds the following - Add explicit _processor method for ZeroShotObjectDetectionPipeline. This is necessary as pipelines don't auto infer processors yet and `OwlVitProcessor` wraps tokenizer and feature_extractor together, to process multiple images at once - Add auto tests and other tests for ZeroShotObjectDetectionPipeline * Add batching for ZeroShotObjectDetectionPipeline * Fix doc-string ZeroShotObjectDetectionPipeline * Fix output format: ZeroShotObjectDetectionPipeline --- docs/source/en/main_classes/pipelines.mdx | 7 + docs/source/en/model_doc/auto.mdx | 4 + src/transformers/__init__.py | 6 + src/transformers/models/auto/__init__.py | 4 + src/transformers/models/auto/modeling_auto.py | 19 ++ src/transformers/pipelines/__init__.py | 9 + .../pipelines/zero_shot_object_detection.py | 278 ++++++++++++++++++ src/transformers/utils/dummy_pt_objects.py | 10 + ...st_pipelines_zero_shot_object_detection.py | 263 +++++++++++++++++ utils/update_metadata.py | 5 + 10 files changed, 605 insertions(+) create mode 100644 src/transformers/pipelines/zero_shot_object_detection.py create mode 100644 tests/pipelines/test_pipelines_zero_shot_object_detection.py diff --git a/docs/source/en/main_classes/pipelines.mdx b/docs/source/en/main_classes/pipelines.mdx index 4043a00009e22d..5374f1a4003adf 100644 --- a/docs/source/en/main_classes/pipelines.mdx +++ b/docs/source/en/main_classes/pipelines.mdx @@ -43,6 +43,7 @@ There are two categories of pipeline abstractions to be aware about: - [`VisualQuestionAnsweringPipeline`] - [`ZeroShotClassificationPipeline`] - [`ZeroShotImageClassificationPipeline`] + - [`ZeroShotObjectDetectionPipeline`] ## The pipeline abstraction @@ -456,6 +457,12 @@ See [`TokenClassificationPipeline`] for all details. - __call__ - all +### ZeroShotObjectDetectionPipeline + +[[autodoc]] ZeroShotObjectDetectionPipeline + - __call__ + - all + ## Parent class: `Pipeline` [[autodoc]] Pipeline diff --git a/docs/source/en/model_doc/auto.mdx b/docs/source/en/model_doc/auto.mdx index 93976424ba8edd..01db8c4b1f7ba9 100644 --- a/docs/source/en/model_doc/auto.mdx +++ b/docs/source/en/model_doc/auto.mdx @@ -174,6 +174,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its [[autodoc]] AutoModelForInstanceSegmentation +## AutoModelForZeroShotObjectDetection + +[[autodoc]] AutoModelForZeroShotObjectDetection + ## TFAutoModel [[autodoc]] TFAutoModel diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 18bfea30a09e57..026ec59eb1763c 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -442,6 +442,7 @@ "VisualQuestionAnsweringPipeline", "ZeroShotClassificationPipeline", "ZeroShotImageClassificationPipeline", + "ZeroShotObjectDetectionPipeline", "pipeline", ], "processing_utils": ["ProcessorMixin"], @@ -878,6 +879,7 @@ "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING", "MODEL_MAPPING", "MODEL_WITH_LM_HEAD_MAPPING", + "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING", "AutoModel", "AutoModelForAudioClassification", "AutoModelForAudioFrameClassification", @@ -905,6 +907,7 @@ "AutoModelForVision2Seq", "AutoModelForVisualQuestionAnswering", "AutoModelWithLMHead", + "AutoModelForZeroShotObjectDetection", ] ) _import_structure["models.bart"].extend( @@ -3407,6 +3410,7 @@ VisualQuestionAnsweringPipeline, ZeroShotClassificationPipeline, ZeroShotImageClassificationPipeline, + ZeroShotObjectDetectionPipeline, pipeline, ) from .processing_utils import ProcessorMixin @@ -3772,6 +3776,7 @@ MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoModel, @@ -3800,6 +3805,7 @@ AutoModelForVideoClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, + AutoModelForZeroShotObjectDetection, AutoModelWithLMHead, ) from .models.bart import ( diff --git a/src/transformers/models/auto/__init__.py b/src/transformers/models/auto/__init__.py index 6129253f14711b..1964c73938f683 100644 --- a/src/transformers/models/auto/__init__.py +++ b/src/transformers/models/auto/__init__.py @@ -69,6 +69,7 @@ "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING", "MODEL_MAPPING", "MODEL_WITH_LM_HEAD_MAPPING", + "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING", "AutoModel", "AutoModelForAudioClassification", "AutoModelForAudioFrameClassification", @@ -96,6 +97,7 @@ "AutoModelForVisualQuestionAnswering", "AutoModelForDocumentQuestionAnswering", "AutoModelWithLMHead", + "AutoModelForZeroShotObjectDetection", ] try: @@ -215,6 +217,7 @@ MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, MODEL_FOR_VISION_2_SEQ_MAPPING, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, + MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoModel, @@ -243,6 +246,7 @@ AutoModelForVideoClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, + AutoModelForZeroShotObjectDetection, AutoModelWithLMHead, ) diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 4cf9b58a517bfa..237c98c5bb4ab6 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -472,6 +472,13 @@ ] ) +MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict( + [ + # Model for Zero Shot Object Detection mapping + ("owlvit", "OwlViTForObjectDetection") + ] +) + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict( [ # Model for Seq2Seq Causal LM mapping @@ -830,6 +837,9 @@ CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES ) MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES) +MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping( + CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES +) MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping( CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) @@ -1016,6 +1026,15 @@ class AutoModelForObjectDetection(_BaseAutoModelClass): AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc="object detection") +class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass): + _model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING + + +AutoModelForZeroShotObjectDetection = auto_class_update( + AutoModelForZeroShotObjectDetection, head_doc="zero-shot object detection" +) + + class AutoModelForVideoClassification(_BaseAutoModelClass): _model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING diff --git a/src/transformers/pipelines/__init__.py b/src/transformers/pipelines/__init__.py index da0f3d4d83fcb2..0a878728185acd 100755 --- a/src/transformers/pipelines/__init__.py +++ b/src/transformers/pipelines/__init__.py @@ -72,6 +72,7 @@ from .visual_question_answering import VisualQuestionAnsweringPipeline from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline from .zero_shot_image_classification import ZeroShotImageClassificationPipeline +from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline if is_tf_available(): @@ -124,6 +125,7 @@ AutoModelForTokenClassification, AutoModelForVision2Seq, AutoModelForVisualQuestionAnswering, + AutoModelForZeroShotObjectDetection, ) if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel @@ -335,6 +337,13 @@ "default": {"model": {"pt": ("facebook/detr-resnet-50", "2729413")}}, "type": "image", }, + "zero-shot-object-detection": { + "impl": ZeroShotObjectDetectionPipeline, + "tf": (), + "pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (), + "default": {"model": {"pt": ("google/owlvit-base-patch32", "17740e1")}}, + "type": "multimodal", + }, } NO_FEATURE_EXTRACTOR_TASKS = set() diff --git a/src/transformers/pipelines/zero_shot_object_detection.py b/src/transformers/pipelines/zero_shot_object_detection.py new file mode 100644 index 00000000000000..8c18bd502e6f55 --- /dev/null +++ b/src/transformers/pipelines/zero_shot_object_detection.py @@ -0,0 +1,278 @@ +from typing import Dict, List, Union + +import numpy as np + +from ..tokenization_utils_base import BatchEncoding +from ..utils import ( + add_end_docstrings, + is_tf_available, + is_torch_available, + is_vision_available, + logging, + requires_backends, +) +from .base import PIPELINE_INIT_ARGS, Pipeline + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + import torch + + from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(PIPELINE_INIT_ARGS) +class ZeroShotObjectDetectionPipeline(Pipeline): + """ + Zero shot object detection pipeline using `OwlViTForObjectDetection`. This pipeline predicts bounding boxes of + objects when you provide an image and a set of `candidate_labels`. + + This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"zero-shot-object-detection"`. + + See the list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=zero-shot-object-detection). + """ + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + if self.framework == "tf": + raise ValueError(f"The {self.__class__} is only available in PyTorch.") + + requires_backends(self, "vision") + self.check_model_type(MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING) + + def __call__( + self, + images: Union[str, List[str], "Image.Image", List["Image.Image"]], + text_queries: Union[str, List[str], List[List[str]]] = None, + **kwargs + ): + """ + Detect objects (bounding boxes & classes) in the image(s) passed as inputs. + + Args: + images (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing an http url pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + text_queries (`str` or `List[str]` or `List[List[str]]`): Text queries to query the target image with. + If given multiple images, `text_queries` should be provided as a list of lists, where each nested list + contains the text queries for the corresponding image. + + threshold (`float`, *optional*, defaults to 0.1): + The probability necessary to make a prediction. + + top_k (`int`, *optional*, defaults to None): + The number of top predictions that will be returned by the pipeline. If the provided number is `None` + or higher than the number of predictions available, it will default to the number of predictions. + + + Return: + A list of lists containing prediction results, one list per input image. Each list contains dictionaries + with the following keys: + + - **label** (`str`) -- Text query corresponding to the found object. + - **score** (`float`) -- Score corresponding to the object (between 0 and 1). + - **box** (`Dict[str,int]`) -- Bounding box of the detected object in image's original size. It is a + dictionary with `x_min`, `x_max`, `y_min`, `y_max` keys. + """ + if isinstance(text_queries, str) or (isinstance(text_queries, List) and not isinstance(text_queries[0], List)): + if isinstance(images, (str, Image.Image)): + inputs = {"images": images, "text_queries": text_queries} + elif isinstance(images, List): + assert len(images) == 1, "Input text_queries and images must have correspondance" + inputs = {"images": images[0], "text_queries": text_queries} + else: + raise TypeError(f"Innapropriate type of images: {type(images)}") + + elif isinstance(text_queries, str) or (isinstance(text_queries, List) and isinstance(text_queries[0], List)): + if isinstance(images, (Image.Image, str)): + images = [images] + assert len(images) == len(text_queries), "Input text_queries and images must have correspondance" + inputs = {"images": images, "text_queries": text_queries} + else: + """ + Supports the following format + - {"images": images, "text_queries": text_queries} + """ + inputs = images + results = super().__call__(inputs, **kwargs) + return results + + def _sanitize_parameters(self, **kwargs): + postprocess_params = {} + if "threshold" in kwargs: + postprocess_params["threshold"] = kwargs["threshold"] + if "top_k" in kwargs: + postprocess_params["top_k"] = kwargs["top_k"] + return {}, {}, postprocess_params + + def preprocess(self, inputs): + if not isinstance(inputs["images"], List): + inputs["images"] = [inputs["images"]] + images = [load_image(img) for img in inputs["images"]] + text_queries = inputs["text_queries"] + if isinstance(text_queries, str) or isinstance(text_queries[0], str): + text_queries = [text_queries] + + target_sizes = [torch.IntTensor([[img.height, img.width]]) for img in images] + target_sizes = torch.cat(target_sizes) + inputs = self._processor(text=inputs["text_queries"], images=images, return_tensors="pt") + return {"target_sizes": target_sizes, "text_queries": text_queries, **inputs} + + def _forward(self, model_inputs): + target_sizes = model_inputs.pop("target_sizes") + text_queries = model_inputs.pop("text_queries") + outputs = self.model(**model_inputs) + + model_outputs = outputs.__class__({"target_sizes": target_sizes, "text_queries": text_queries, **outputs}) + return model_outputs + + def postprocess(self, model_outputs, threshold=0.1, top_k=None): + texts = model_outputs["text_queries"] + + outputs = self.feature_extractor.post_process( + outputs=model_outputs, target_sizes=model_outputs["target_sizes"] + ) + + results = [] + for i in range(len(outputs)): + keep = outputs[i]["scores"] >= threshold + labels = outputs[i]["labels"][keep].tolist() + scores = outputs[i]["scores"][keep].tolist() + boxes = [self._get_bounding_box(box) for box in outputs[i]["boxes"][keep]] + + result = [ + {"score": score, "label": texts[i][label], "box": box} + for score, label, box in zip(scores, labels, boxes) + ] + + result = sorted(result, key=lambda x: x["score"], reverse=True) + if top_k: + result = result[:top_k] + results.append(result) + + return results + + def _get_bounding_box(self, box: "torch.Tensor") -> Dict[str, int]: + """ + Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... } + + Args: + box (`torch.Tensor`): Tensor containing the coordinates in corners format. + + Returns: + bbox (`Dict[str, int]`): Dict containing the coordinates in corners format. + """ + if self.framework != "pt": + raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch.") + xmin, ymin, xmax, ymax = box.int().tolist() + bbox = { + "xmin": xmin, + "ymin": ymin, + "xmax": xmax, + "ymax": ymax, + } + return bbox + + # Replication of OwlViTProcessor __call__ method, since pipelines don't auto infer processor's yet! + def _processor(self, text=None, images=None, padding="max_length", return_tensors="np", **kwargs): + """ + Main method to prepare for the model one or several text(s) and image(s). This method forwards the `text` and + `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode: + the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to + CLIPFeatureExtractor's [`~CLIPFeatureExtractor.__call__`] if `images` is not `None`. Please refer to the + doctsring of the above two methods for more information. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, + `List[torch.Tensor]`): + The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch + tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a + number of channels, H and W are image height and width. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + - `'tf'`: Return TensorFlow `tf.constant` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + Returns: + [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: + - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. + - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when + `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not + `None`). + - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. + """ + + if text is None and images is None: + raise ValueError("You have to specify at least one text or image. Both cannot be none.") + + if text is not None: + if isinstance(text, str) or (isinstance(text, List) and not isinstance(text[0], List)): + encodings = [self.tokenizer(text, padding=padding, return_tensors=return_tensors, **kwargs)] + + elif isinstance(text, List) and isinstance(text[0], List): + encodings = [] + + # Maximum number of queries across batch + max_num_queries = max([len(t) for t in text]) + + # Pad all batch samples to max number of text queries + for t in text: + if len(t) != max_num_queries: + t = t + [" "] * (max_num_queries - len(t)) + + encoding = self.tokenizer(t, padding=padding, return_tensors=return_tensors, **kwargs) + encodings.append(encoding) + else: + raise TypeError("Input text should be a string, a list of strings or a nested list of strings") + + if return_tensors == "np": + input_ids = np.concatenate([encoding["input_ids"] for encoding in encodings], axis=0) + attention_mask = np.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0) + + elif return_tensors == "pt" and is_torch_available(): + import torch + + input_ids = torch.cat([encoding["input_ids"] for encoding in encodings], dim=0) + attention_mask = torch.cat([encoding["attention_mask"] for encoding in encodings], dim=0) + + elif return_tensors == "tf" and is_tf_available(): + import tensorflow as tf + + input_ids = tf.stack([encoding["input_ids"] for encoding in encodings], axis=0) + attention_mask = tf.stack([encoding["attention_mask"] for encoding in encodings], axis=0) + + else: + raise ValueError("Target return tensor type could not be returned") + + encoding = BatchEncoding() + encoding["input_ids"] = input_ids + encoding["attention_mask"] = attention_mask + + if images is not None: + image_features = self.feature_extractor(images, return_tensors=return_tensors, **kwargs) + + if text is not None and images is not None: + encoding["pixel_values"] = image_features.pixel_values + return encoding + elif text is not None: + return encoding + else: + return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index ef1a6baafabc16..72db36cab91383 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -418,6 +418,9 @@ def load_tf_weights_in_albert(*args, **kwargs): MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = None +MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = None + + MODEL_MAPPING = None @@ -606,6 +609,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class AutoModelForZeroShotObjectDetection(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class AutoModelWithLMHead(metaclass=DummyObject): _backends = ["torch"] diff --git a/tests/pipelines/test_pipelines_zero_shot_object_detection.py b/tests/pipelines/test_pipelines_zero_shot_object_detection.py new file mode 100644 index 00000000000000..10b7e799cc52bd --- /dev/null +++ b/tests/pipelines/test_pipelines_zero_shot_object_detection.py @@ -0,0 +1,263 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline +from transformers.testing_utils import ( + is_pipeline_test, + nested_simplify, + require_tf, + require_torch, + require_vision, + slow, +) + +from .test_pipelines_common import ANY, PipelineTestCaseMeta + + +if is_vision_available(): + from PIL import Image +else: + + class Image: + @staticmethod + def open(*args, **kwargs): + pass + + +@require_vision +@require_torch +@is_pipeline_test +class ZeroShotObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): + + model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING + + def get_test_pipeline(self, model, tokenizer, feature_extractor): + object_detector = pipeline( + "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" + ) + + examples = [ + { + "images": "./tests/fixtures/tests_samples/COCO/000000039769.png", + "text_queries": ["cat", "remote", "couch"], + } + ] + return object_detector, examples + + def run_pipeline_test(self, object_detector, examples): + batch_outputs = object_detector(examples, threshold=0.0) + + self.assertEqual(len(examples), len(batch_outputs)) + for outputs in batch_outputs: + for output_per_image in outputs: + self.assertGreater(len(output_per_image), 0) + for detected_object in output_per_image: + self.assertEqual( + detected_object, + { + "score": ANY(float), + "label": ANY(str), + "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, + }, + ) + + @require_tf + @unittest.skip("Zero Shot Object Detection not implemented in TF") + def test_small_model_tf(self): + pass + + @require_torch + def test_small_model_pt(self): + object_detector = pipeline( + "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" + ) + + outputs = object_detector( + "./tests/fixtures/tests_samples/COCO/000000039769.png", + text_queries=["cat", "remote", "couch"], + threshold=0.64, + ) + + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, + {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, + {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, + {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, + ] + ], + ) + + outputs = object_detector( + ["./tests/fixtures/tests_samples/COCO/000000039769.png"], + text_queries=["cat", "remote", "couch"], + threshold=0.64, + ) + + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, + {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, + {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, + {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, + ] + ], + ) + + outputs = object_detector( + "./tests/fixtures/tests_samples/COCO/000000039769.png", + text_queries=[["cat", "remote", "couch"]], + threshold=0.64, + ) + + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, + {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, + {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, + {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, + ] + ], + ) + + outputs = object_detector( + [ + "./tests/fixtures/tests_samples/COCO/000000039769.png", + "http://images.cocodataset.org/val2017/000000039769.jpg", + ], + text_queries=[["cat", "remote", "couch"], ["cat", "remote", "couch"]], + threshold=0.64, + ) + + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, + {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, + {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, + {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, + ], + [ + {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, + {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, + {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, + {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, + ], + ], + ) + + @require_torch + @slow + def test_large_model_pt(self): + object_detector = pipeline("zero-shot-object-detection") + + outputs = object_detector( + "http://images.cocodataset.org/val2017/000000039769.jpg", text_queries=["cat", "remote", "couch"] + ) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, + {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, + {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, + {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, + {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, + ] + ], + ) + + outputs = object_detector( + [ + "http://images.cocodataset.org/val2017/000000039769.jpg", + "http://images.cocodataset.org/val2017/000000039769.jpg", + ], + text_queries=[["cat", "remote", "couch"], ["cat", "remote", "couch"]], + ) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, + {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, + {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, + {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, + {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, + ], + [ + {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, + {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, + {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, + {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, + {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, + ], + ], + ) + + @require_tf + @unittest.skip("Zero Shot Object Detection not implemented in TF") + def test_large_model_tf(self): + pass + + @require_torch + @slow + def test_threshold(self): + threshold = 0.2 + object_detector = pipeline("zero-shot-object-detection") + + outputs = object_detector( + "http://images.cocodataset.org/val2017/000000039769.jpg", + text_queries=["cat", "remote", "couch"], + threshold=threshold, + ) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, + {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, + {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, + ] + ], + ) + + @require_torch + @slow + def test_top_k(self): + top_k = 2 + object_detector = pipeline("zero-shot-object-detection") + + outputs = object_detector( + "http://images.cocodataset.org/val2017/000000039769.jpg", + text_queries=["cat", "remote", "couch"], + top_k=top_k, + ) + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + [ + {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, + {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, + ] + ], + ) diff --git a/utils/update_metadata.py b/utils/update_metadata.py index aaf296c0436b08..8bb3b71672d812 100644 --- a/utils/update_metadata.py +++ b/utils/update_metadata.py @@ -58,6 +58,11 @@ ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), + ( + "zero-shot-object-detection", + "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", + "AutoModelForZeroShotObjectDetection", + ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), From fa4bcd5274125278ce6f97438760b18640b83e62 Mon Sep 17 00:00:00 2001 From: ddobokki <44228269+ddobokki@users.noreply.github.com> Date: Fri, 7 Oct 2022 23:05:48 +0900 Subject: [PATCH 491/539] edit: cast attention_mask to long in DataCollatorCTCWithPadding (#19369) * edit: casting attention_mask to long in DataCollatorCTCWithPadding * edit: casting attention_mask to long in DataCollatorCTCWithPadding --- .../pytorch/speech-recognition/run_speech_recognition_ctc.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index 54ea4e17f4545c..904a297c5a86d4 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -317,6 +317,8 @@ def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels + if "attention_mask" in batch: + batch["attention_mask"] = batch["attention_mask"].to(torch.long) return batch From 5fef17f4907b4cb78685149fa03b1fbb607b3bee Mon Sep 17 00:00:00 2001 From: David Yang Date: Fri, 7 Oct 2022 22:14:00 +0800 Subject: [PATCH 492/539] Copy BertTokenizer dependency into retribert tokenizer (#19371) --- .../retribert/tokenization_retribert.py | 473 +++++++++++++++++- .../retribert/tokenization_retribert_fast.py | 147 +++++- 2 files changed, 614 insertions(+), 6 deletions(-) diff --git a/src/transformers/models/retribert/tokenization_retribert.py b/src/transformers/models/retribert/tokenization_retribert.py index b61c0634406a54..a15cbcdf74bfda 100644 --- a/src/transformers/models/retribert/tokenization_retribert.py +++ b/src/transformers/models/retribert/tokenization_retribert.py @@ -14,8 +14,13 @@ # limitations under the License. """Tokenization classes for RetriBERT.""" +import collections +import os +import unicodedata +from typing import List, Optional, Tuple + +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging -from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) @@ -40,14 +45,70 @@ } -class RetriBertTokenizer(BertTokenizer): +# Copied from transformers.models.bert.tokenization_bert.load_vocab +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +class RetriBertTokenizer(PreTrainedTokenizer): + r""" Constructs a RetriBERT tokenizer. [`RetriBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and wordpiece. - Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters. + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer + to: this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + do_basic_tokenize (`bool`, *optional*, defaults to `True`): + Whether or not to do basic tokenization before WordPiece. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES @@ -55,3 +116,409 @@ class RetriBertTokenizer(BertTokenizer): max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION model_input_names = ["input_ids", "attention_mask"] + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.__init__ + def __init__( + self, + vocab_file, + do_lower_case=True, + do_basic_tokenize=True, + never_split=None, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs + ): + super().__init__( + do_lower_case=do_lower_case, + do_basic_tokenize=do_basic_tokenize, + never_split=never_split, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" + " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + ) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) + + @property + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.do_lower_case + def do_lower_case(self): + return self.basic_tokenizer.do_lower_case + + @property + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.vocab_size + def vocab_size(self): + return len(self.vocab) + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_vocab + def get_vocab(self): + return dict(self.vocab, **self.added_tokens_encoder) + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._tokenize + def _tokenize(self, text): + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): + + # If the token is part of the never_split set + if token in self.basic_tokenizer.never_split: + split_tokens.append(token) + else: + split_tokens += self.wordpiece_tokenizer.tokenize(token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.convert_tokens_to_string + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = " ".join(tokens).replace(" ##", "").strip() + return out_string + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + else: + vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(token + "\n") + index += 1 + return (vocab_file,) + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + + For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through *BasicTokenizer*. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens diff --git a/src/transformers/models/retribert/tokenization_retribert_fast.py b/src/transformers/models/retribert/tokenization_retribert_fast.py index 3451d1224a7a18..2532f839a30090 100644 --- a/src/transformers/models/retribert/tokenization_retribert_fast.py +++ b/src/transformers/models/retribert/tokenization_retribert_fast.py @@ -14,8 +14,13 @@ # limitations under the License. """Tokenization classes for RetriBERT.""" +import json +from typing import List, Optional, Tuple + +from tokenizers import normalizers + +from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging -from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_retribert import RetriBertTokenizer @@ -46,14 +51,47 @@ } -class RetriBertTokenizerFast(BertTokenizerFast): +class RetriBertTokenizerFast(PreTrainedTokenizerFast): r""" Construct a "fast" RetriBERT tokenizer (backed by HuggingFace's *tokenizers* library). [`RetriBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation splitting and wordpiece. - Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters. + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + clean_text (`bool`, *optional*, defaults to `True`): + Whether or not to clean the text before tokenization by removing any control characters and replacing all + whitespaces by the classic one. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this + issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + wordpieces_prefix (`str`, *optional*, defaults to `"##"`): + The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES @@ -62,3 +100,106 @@ class RetriBertTokenizerFast(BertTokenizerFast): pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = RetriBertTokenizer model_input_names = ["input_ids", "attention_mask"] + + # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.__init__ + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + do_lower_case=True, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs + ): + super().__init__( + vocab_file, + tokenizer_file=tokenizer_file, + do_lower_case=do_lower_case, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) + if ( + normalizer_state.get("lowercase", do_lower_case) != do_lower_case + or normalizer_state.get("strip_accents", strip_accents) != strip_accents + or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars + ): + normalizer_class = getattr(normalizers, normalizer_state.pop("type")) + normalizer_state["lowercase"] = do_lower_case + normalizer_state["strip_accents"] = strip_accents + normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars + self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) + + self.do_lower_case = do_lower_case + + # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.build_inputs_with_special_tokens + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + + if token_ids_1: + output += token_ids_1 + [self.sep_token_id] + + return output + + # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.create_token_type_ids_from_sequences + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + # Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) From a26d71d6aed8c8dae31c8ec645a1325d2acb4d6d Mon Sep 17 00:00:00 2001 From: Dean Wyatte <2512762+dwyatte@users.noreply.github.com> Date: Fri, 7 Oct 2022 08:53:03 -0600 Subject: [PATCH 493/539] Export TensorFlow models to ONNX with dynamic input shapes (#19255) * validate onnx models with a different input geometry than saved with * only test working features for now * simpler test skipping * rm TODO * expose batch_size/seq_length on vit * skip certain name, feature, framework parameterizations known to fail validation * Trigger CI * Trigger CI --- .../models/clip/configuration_clip.py | 10 +++++-- .../models/groupvit/configuration_groupvit.py | 10 +++++-- .../models/owlvit/configuration_owlvit.py | 10 +++++-- src/transformers/onnx/convert.py | 20 ++++++++++--- tests/onnx/test_onnx_v2.py | 28 +++++++++++++++---- 5 files changed, 63 insertions(+), 15 deletions(-) diff --git a/src/transformers/models/clip/configuration_clip.py b/src/transformers/models/clip/configuration_clip.py index f70d6a6064c7cc..b6066f81b21156 100644 --- a/src/transformers/models/clip/configuration_clip.py +++ b/src/transformers/models/clip/configuration_clip.py @@ -355,11 +355,17 @@ def atol_for_validation(self) -> float: def generate_dummy_inputs( self, processor: "ProcessorMixin", + batch_size: int = -1, + seq_length: int = -1, framework: Optional["TensorType"] = None, ) -> Mapping[str, Any]: - text_input_dict = super().generate_dummy_inputs(processor.tokenizer, framework=framework) - image_input_dict = super().generate_dummy_inputs(processor.feature_extractor, framework=framework) + text_input_dict = super().generate_dummy_inputs( + processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework + ) + image_input_dict = super().generate_dummy_inputs( + processor.feature_extractor, batch_size=batch_size, framework=framework + ) return {**text_input_dict, **image_input_dict} @property diff --git a/src/transformers/models/groupvit/configuration_groupvit.py b/src/transformers/models/groupvit/configuration_groupvit.py index ea428224105133..efb37dbce8223f 100644 --- a/src/transformers/models/groupvit/configuration_groupvit.py +++ b/src/transformers/models/groupvit/configuration_groupvit.py @@ -381,11 +381,17 @@ def atol_for_validation(self) -> float: def generate_dummy_inputs( self, processor: "ProcessorMixin", + batch_size: int = -1, + seq_length: int = -1, framework: Optional["TensorType"] = None, ) -> Mapping[str, Any]: - text_input_dict = super().generate_dummy_inputs(processor.tokenizer, framework=framework) - image_input_dict = super().generate_dummy_inputs(processor.feature_extractor, framework=framework) + text_input_dict = super().generate_dummy_inputs( + processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework + ) + image_input_dict = super().generate_dummy_inputs( + processor.feature_extractor, batch_size=batch_size, framework=framework + ) return {**text_input_dict, **image_input_dict} @property diff --git a/src/transformers/models/owlvit/configuration_owlvit.py b/src/transformers/models/owlvit/configuration_owlvit.py index ff0bd6e6120d05..1725c4dd84a3bb 100644 --- a/src/transformers/models/owlvit/configuration_owlvit.py +++ b/src/transformers/models/owlvit/configuration_owlvit.py @@ -372,11 +372,17 @@ def atol_for_validation(self) -> float: def generate_dummy_inputs( self, processor: "ProcessorMixin", + batch_size: int = -1, + seq_length: int = -1, framework: Optional["TensorType"] = None, ) -> Mapping[str, Any]: - text_input_dict = super().generate_dummy_inputs(processor.tokenizer, framework=framework) - image_input_dict = super().generate_dummy_inputs(processor.feature_extractor, framework=framework) + text_input_dict = super().generate_dummy_inputs( + processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework + ) + image_input_dict = super().generate_dummy_inputs( + processor.feature_extractor, batch_size=batch_size, framework=framework + ) return {**text_input_dict, **image_input_dict} @property diff --git a/src/transformers/onnx/convert.py b/src/transformers/onnx/convert.py index a896b76a1cca4e..2da00e2b0dea6c 100644 --- a/src/transformers/onnx/convert.py +++ b/src/transformers/onnx/convert.py @@ -262,7 +262,9 @@ def export_tensorflow( inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) onnx_outputs = list(config.outputs.keys()) - input_signature = [tf.TensorSpec.from_tensor(tensor, name=key) for key, tensor in model_inputs.items()] + input_signature = [ + tf.TensorSpec([None] * tensor.ndim, dtype=tensor.dtype, name=key) for key, tensor in model_inputs.items() + ] onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset) onnx.save(onnx_model, output.as_posix()) config.restore_ops() @@ -363,12 +365,22 @@ def validate_model_outputs( logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") preprocessor = tokenizer - # TODO: generate inputs with a different batch_size and seq_len that was used for conversion to properly test + # generate inputs with a different batch_size and seq_len that was used for conversion to properly test # dynamic input shapes. if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): - reference_model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH) + reference_model_inputs = config.generate_dummy_inputs( + preprocessor, + batch_size=config.default_fixed_batch + 1, + seq_length=config.default_fixed_sequence + 1, + framework=TensorType.PYTORCH, + ) else: - reference_model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.TENSORFLOW) + reference_model_inputs = config.generate_dummy_inputs( + preprocessor, + batch_size=config.default_fixed_batch + 1, + seq_length=config.default_fixed_sequence + 1, + framework=TensorType.TENSORFLOW, + ) # Create ONNX Runtime session options = SessionOptions() diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index dac4a25803c04f..4c15a68867895c 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -284,10 +284,12 @@ class OnnxExportTestCaseV2(TestCase): Integration tests ensuring supported models are correctly exported """ - def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_constructor, device="cpu"): + def _onnx_export( + self, test_name, name, model_name, feature, onnx_config_class_constructor, device="cpu", framework="pt" + ): from transformers.onnx import export - model_class = FeaturesManager.get_model_class_for_feature(feature) + model_class = FeaturesManager.get_model_class_for_feature(feature, framework=framework) config = AutoConfig.from_pretrained(model_name) model = model_class.from_config(config) @@ -296,6 +298,22 @@ def _onnx_export(self, test_name, name, model_name, feature, onnx_config_class_c if model.__class__.__name__.startswith("Yolos") and device != "cpu": return + # ONNX inference fails with the following name, feature, framework parameterizations + # See: https://github.com/huggingface/transformers/issues/19357 + if (name, feature, framework) in { + ("deberta-v2", "question-answering", "pt"), + ("deberta-v2", "multiple-choice", "pt"), + ("roformer", "multiple-choice", "pt"), + ("groupvit", "default", "pt"), + ("perceiver", "masked-lm", "pt"), + ("perceiver", "sequence-classification", "pt"), + ("perceiver", "image-classification", "pt"), + ("bert", "multiple-choice", "tf"), + ("camembert", "multiple-choice", "tf"), + ("roberta", "multiple-choice", "tf"), + }: + return + onnx_config = onnx_config_class_constructor(model.config) if is_torch_available(): @@ -364,13 +382,13 @@ def test_pytorch_export_seq2seq_with_past( @require_tf @require_vision def test_tensorflow_export(self, test_name, name, model_name, feature, onnx_config_class_constructor): - self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor) + self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor, framework="tf") @parameterized.expand(_get_models_to_test(TENSORFLOW_EXPORT_WITH_PAST_MODELS), skip_on_empty=True) @slow @require_tf def test_tensorflow_export_with_past(self, test_name, name, model_name, feature, onnx_config_class_constructor): - self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor) + self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor, framework="tf") @parameterized.expand(_get_models_to_test(TENSORFLOW_EXPORT_SEQ2SEQ_WITH_PAST_MODELS), skip_on_empty=True) @slow @@ -378,7 +396,7 @@ def test_tensorflow_export_with_past(self, test_name, name, model_name, feature, def test_tensorflow_export_seq2seq_with_past( self, test_name, name, model_name, feature, onnx_config_class_constructor ): - self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor) + self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor, framework="tf") class StableDropoutTestCase(TestCase): From 994b7a4eea02d493327b08b71b99e2dbd991ce73 Mon Sep 17 00:00:00 2001 From: Arthur <48595927+ArthurZucker@users.noreply.github.com> Date: Fri, 7 Oct 2022 16:54:08 +0200 Subject: [PATCH 494/539] update attention mask handling (#19385) * update feature extractor params * update attention mask handling --- .../whisper/feature_extraction_whisper.py | 17 +++++++++++++- .../pipelines/automatic_speech_recognition.py | 23 ++++++++++++------- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/whisper/feature_extraction_whisper.py b/src/transformers/models/whisper/feature_extraction_whisper.py index 33d53f150c9dd5..0d6bbd9ed18bbf 100644 --- a/src/transformers/models/whisper/feature_extraction_whisper.py +++ b/src/transformers/models/whisper/feature_extraction_whisper.py @@ -218,6 +218,7 @@ def __call__( return_attention_mask: Optional[bool] = None, padding: Optional[str] = "max_length", max_length: Optional[int] = None, + sampling_rate: Optional[int] = None, **kwargs ) -> BatchFeature: """ @@ -255,11 +256,25 @@ def __call__( - `'np'`: Return Numpy `np.ndarray` objects. sampling_rate (`int`, *optional*): The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass - `sampling_rate` at the forward call to prevent silent errors. + `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition + pipeline. padding_value (`float`, defaults to 0.0): The value that is used to fill the padding values / vectors. """ + if sampling_rate is not None: + if sampling_rate != self.sampling_rate: + raise ValueError( + f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" + f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" + f" {self.sampling_rate} and not {sampling_rate}." + ) + else: + logger.warning( + "It is strongly recommended to pass the `sampling_rate` argument to this function. " + "Failing to do so can result in silent errors that might be hard to debug." + ) + is_batched = bool( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list))) diff --git a/src/transformers/pipelines/automatic_speech_recognition.py b/src/transformers/pipelines/automatic_speech_recognition.py index c52b1002cf7133..1d5546edb5e6f9 100644 --- a/src/transformers/pipelines/automatic_speech_recognition.py +++ b/src/transformers/pipelines/automatic_speech_recognition.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import inspect from collections import defaultdict from typing import TYPE_CHECKING, Dict, Optional, Union @@ -259,9 +260,9 @@ def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None): # Currently chunking is not possible at this level for `seq2seq` so # it's ok. align_to = self.model.config.inputs_to_logits_ratio - chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to)) * align_to - stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to)) * align_to - stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to)) * align_to + chunk_len = int(round(chunk_length_s * self.feature_extractor.sampling_rate / align_to) * align_to) + stride_left = int(round(stride_length_s[0] * self.feature_extractor.sampling_rate / align_to) * align_to) + stride_right = int(round(stride_length_s[1] * self.feature_extractor.sampling_rate / align_to) * align_to) if self.type not in {"ctc", "ctc_with_lm"}: raise ValueError( @@ -304,12 +305,18 @@ def _forward(self, model_inputs): f"`input_features` or `input_values` key, but only has {model_inputs.keys()}" ) - attention_mask = model_inputs.pop("attention_mask", None) - tokens = self.model.generate( - encoder_outputs=encoder(inputs, attention_mask=attention_mask), - attention_mask=attention_mask, - ) + accepts_attention_mask = "attention_mask" in set(inspect.signature(encoder.forward).parameters.keys()) + if accepts_attention_mask: + attention_mask = model_inputs.pop("attention_mask", None) + tokens = self.model.generate( + encoder_outputs=encoder(inputs, attention_mask=attention_mask), + attention_mask=attention_mask, + ) + else: + tokens = self.model.generate(inputs) + out = {"tokens": tokens} + else: stride = model_inputs.pop("stride", None) input_values = model_inputs.pop("input_values") From e6fc2016ad5b52efa09df14cbb43e3c69103afb4 Mon Sep 17 00:00:00 2001 From: Ryan Chan Date: Fri, 7 Oct 2022 16:32:55 +0100 Subject: [PATCH 495/539] Remove dependency of Bert from Squeezebert tokenizer (#19403) * Remove dependency of Bert from Squeezebert tokenizer * run style corrections * update copies from BertTokenizers * Update changes and style to Squeezebert files * update copies for bert-fast --- .../squeezebert/tokenization_squeezebert.py | 467 +++++++++++++++++- .../tokenization_squeezebert_fast.py | 149 +++++- 2 files changed, 602 insertions(+), 14 deletions(-) diff --git a/src/transformers/models/squeezebert/tokenization_squeezebert.py b/src/transformers/models/squeezebert/tokenization_squeezebert.py index 72d927eccafb59..00d450058238fd 100644 --- a/src/transformers/models/squeezebert/tokenization_squeezebert.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert.py @@ -14,8 +14,13 @@ # limitations under the License. """Tokenization classes for SqueezeBERT.""" +import collections +import os +import unicodedata +from typing import List, Optional, Tuple + +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging -from ..bert.tokenization_bert import BertTokenizer logger = logging.get_logger(__name__) @@ -48,17 +53,465 @@ } -class SqueezeBertTokenizer(BertTokenizer): +# Copied from transformers.models.bert.tokenization_bert.load_vocab +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BertTokenizer with Bert->SqueezeBert,BERT->SqueezeBERT +class SqueezeBertTokenizer(PreTrainedTokenizer): r""" - Constructs a SqueezeBert tokenizer. + Construct a SqueezeBERT tokenizer. Based on WordPiece. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. - [`SqueezeBertTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting - + wordpiece. + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + do_basic_tokenize (`bool`, *optional*, defaults to `True`): + Whether or not to do basic tokenization before WordPiece. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. - Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters. + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original SqueezeBERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__( + self, + vocab_file, + do_lower_case=True, + do_basic_tokenize=True, + never_split=None, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs + ): + super().__init__( + do_lower_case=do_lower_case, + do_basic_tokenize=do_basic_tokenize, + never_split=never_split, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" + " model use `tokenizer = SqueezeBertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + ) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) + + @property + def do_lower_case(self): + return self.basic_tokenizer.do_lower_case + + @property + def vocab_size(self): + return len(self.vocab) + + def get_vocab(self): + return dict(self.vocab, **self.added_tokens_encoder) + + def _tokenize(self, text): + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): + + # If the token is part of the never_split set + if token in self.basic_tokenizer.never_split: + split_tokens.append(token) + else: + split_tokens += self.wordpiece_tokenizer.tokenize(token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = " ".join(tokens).replace(" ##", "").strip() + return out_string + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A SqueezeBERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + else: + vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(token + "\n") + index += 1 + return (vocab_file,) + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + + For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through *BasicTokenizer*. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens diff --git a/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py index 5ee656e5a8d5e4..0423c16fc33125 100644 --- a/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py +++ b/src/transformers/models/squeezebert/tokenization_squeezebert_fast.py @@ -14,8 +14,13 @@ # limitations under the License. """Tokenization classes for SqueezeBERT.""" +import json +from typing import List, Optional, Tuple + +from tokenizers import normalizers + +from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging -from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_squeezebert import SqueezeBertTokenizer @@ -60,18 +65,148 @@ } -class SqueezeBertTokenizerFast(BertTokenizerFast): +# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with Bert->SqueezeBert,BERT->SqueezeBERT +class SqueezeBertTokenizerFast(PreTrainedTokenizerFast): r""" - Constructs a "Fast" SqueezeBert tokenizer (backed by HuggingFace's *tokenizers* library). + Construct a "fast" SqueezeBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. - [`SqueezeBertTokenizerFast`] is identical to [`BertTokenizerFast`] and runs end-to-end tokenization: punctuation - splitting + wordpiece. + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. - Refer to superclass [`BertTokenizerFast`] for usage examples and documentation concerning parameters. + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + clean_text (`bool`, *optional*, defaults to `True`): + Whether or not to clean the text before tokenization by removing any control characters and replacing all + whitespaces by the classic one. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this + issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original SqueezeBERT). + wordpieces_prefix (`str`, *optional*, defaults to `"##"`): + The prefix for subwords. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = SqueezeBertTokenizer + + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + do_lower_case=True, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs + ): + super().__init__( + vocab_file, + tokenizer_file=tokenizer_file, + do_lower_case=do_lower_case, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) + if ( + normalizer_state.get("lowercase", do_lower_case) != do_lower_case + or normalizer_state.get("strip_accents", strip_accents) != strip_accents + or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars + ): + normalizer_class = getattr(normalizers, normalizer_state.pop("type")) + normalizer_state["lowercase"] = do_lower_case + normalizer_state["strip_accents"] = strip_accents + normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars + self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) + + self.do_lower_case = do_lower_case + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A SqueezeBERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + + if token_ids_1: + output += token_ids_1 + [self.sep_token_id] + + return output + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A SqueezeBERT + sequence pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) From c2b83d540e94c609b5ede3fbf6f374a577a747b7 Mon Sep 17 00:00:00 2001 From: harry7337 <75776208+harry7337@users.noreply.github.com> Date: Fri, 7 Oct 2022 21:19:09 +0530 Subject: [PATCH 496/539] Removed Bert and XML Dependency from Herbert (#19410) Co-authored-by: harry7337 --- .../models/herbert/tokenization_herbert.py | 562 +++++++++++++++++- 1 file changed, 554 insertions(+), 8 deletions(-) diff --git a/src/transformers/models/herbert/tokenization_herbert.py b/src/transformers/models/herbert/tokenization_herbert.py index bd301ed7fe8fb7..479446787a3fe2 100644 --- a/src/transformers/models/herbert/tokenization_herbert.py +++ b/src/transformers/models/herbert/tokenization_herbert.py @@ -12,10 +12,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import json +import os +import re +import unicodedata +from typing import List, Optional, Tuple +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace from ...utils import logging -from ..bert.tokenization_bert import BasicTokenizer -from ..xlm.tokenization_xlm import XLMTokenizer logger = logging.get_logger(__name__) @@ -38,7 +42,239 @@ PRETRAINED_INIT_CONFIGURATION = {} -class HerbertTokenizer(XLMTokenizer): +# Copied from transformers.models.xlm.tokenization_xlm.get_pairs +def get_pairs(word): + """ + Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length + strings) + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +# Copied from transformers.models.xlm.tokenization_xlm.replace_unicode_punct +def replace_unicode_punct(text): + """ + Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl + """ + text = text.replace(",", ",") + text = re.sub(r"。\s*", ". ", text) + text = text.replace("、", ",") + text = text.replace("”", '"') + text = text.replace("“", '"') + text = text.replace("∶", ":") + text = text.replace(":", ":") + text = text.replace("?", "?") + text = text.replace("《", '"') + text = text.replace("》", '"') + text = text.replace(")", ")") + text = text.replace("!", "!") + text = text.replace("(", "(") + text = text.replace(";", ";") + text = text.replace("1", "1") + text = text.replace("」", '"') + text = text.replace("「", '"') + text = text.replace("0", "0") + text = text.replace("3", "3") + text = text.replace("2", "2") + text = text.replace("5", "5") + text = text.replace("6", "6") + text = text.replace("9", "9") + text = text.replace("7", "7") + text = text.replace("8", "8") + text = text.replace("4", "4") + text = re.sub(r".\s*", ". ", text) + text = text.replace("~", "~") + text = text.replace("’", "'") + text = text.replace("…", "...") + text = text.replace("━", "-") + text = text.replace("〈", "<") + text = text.replace("〉", ">") + text = text.replace("【", "[") + text = text.replace("】", "]") + text = text.replace("%", "%") + return text + + +# Copied from transformers.models.xlm.tokenization_xlm.remove_non_printing_char +def remove_non_printing_char(text): + """ + Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl + """ + output = [] + for char in text: + cat = unicodedata.category(char) + if cat.startswith("C"): + continue + output.append(char) + return "".join(output) + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +class HerbertTokenizer(PreTrainedTokenizer): """ Construct a BPE tokenizer for HerBERT. @@ -68,22 +304,74 @@ def __init__( pad_token="", mask_token="", sep_token="", + bos_token="", do_lowercase_and_remove_accent=False, + additional_special_tokens=[ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + ], + lang2id=None, + id2lang=None, **kwargs ): super().__init__( - vocab_file, - merges_file, - tokenizer_file=None, - cls_token=cls_token, unk_token=unk_token, + bos_token=bos_token, + sep_token=sep_token, pad_token=pad_token, + cls_token=cls_token, mask_token=mask_token, - sep_token=sep_token, + additional_special_tokens=additional_special_tokens, + lang2id=lang2id, + id2lang=id2lang, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, + tokenizer_file=None, **kwargs, ) + + try: + import sacremoses + except ImportError: + raise ImportError( + "You need to install sacremoses to use HerbertTokenizer. " + "See https://pypi.org/project/sacremoses/ for installation." + ) + + self.sm = sacremoses + + # cache of sm.MosesPunctNormalizer instance + self.cache_moses_punct_normalizer = dict() + # cache of sm.MosesTokenizer instance + self.cache_moses_tokenizer = dict() + self.lang_with_custom_tokenizer = set(["zh", "th", "ja"]) + # True for current supported model (v1.2.0), False for XLM-17 & 100 + self.do_lowercase_and_remove_accent = do_lowercase_and_remove_accent + self.lang2id = lang2id + self.id2lang = id2lang + if lang2id is not None and id2lang is not None: + assert len(lang2id) == len(id2lang) + + self.ja_word_tokenizer = None + self.zh_word_tokenizer = None + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + merges = merges_handle.read().split("\n")[:-1] + merges = [tuple(merge.split()[:2]) for merge in merges] + self.bpe_ranks = dict(zip(merges, range(len(merges)))) + self.cache = {} + self.bert_pre_tokenizer = BasicTokenizer( do_lower_case=False, never_split=self.all_special_tokens, @@ -91,6 +379,112 @@ def __init__( strip_accents=False, ) + @property + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.do_lower_case + def do_lower_case(self): + return self.do_lowercase_and_remove_accent + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_punct_norm + def moses_punct_norm(self, text, lang): + if lang not in self.cache_moses_punct_normalizer: + punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang) + self.cache_moses_punct_normalizer[lang] = punct_normalizer + else: + punct_normalizer = self.cache_moses_punct_normalizer[lang] + return punct_normalizer.normalize(text) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_tokenize + def moses_tokenize(self, text, lang): + if lang not in self.cache_moses_tokenizer: + moses_tokenizer = self.sm.MosesTokenizer(lang=lang) + self.cache_moses_tokenizer[lang] = moses_tokenizer + else: + moses_tokenizer = self.cache_moses_tokenizer[lang] + return moses_tokenizer.tokenize(text, return_str=False, escape=False) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.moses_pipeline + def moses_pipeline(self, text, lang): + text = replace_unicode_punct(text) + text = self.moses_punct_norm(text, lang) + text = remove_non_printing_char(text) + return text + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.ja_tokenize + def ja_tokenize(self, text): + if self.ja_word_tokenizer is None: + try: + import Mykytea + + self.ja_word_tokenizer = Mykytea.Mykytea( + f"-model {os.path.expanduser('~')}/local/share/kytea/model.bin" + ) + except (AttributeError, ImportError): + logger.error( + "Make sure you install KyTea (https://github.com/neubig/kytea) and it's python wrapper" + " (https://github.com/chezou/Mykytea-python) with the following steps" + ) + logger.error("1. git clone git@github.com:neubig/kytea.git && cd kytea") + logger.error("2. autoreconf -i") + logger.error("3. ./configure --prefix=$HOME/local") + logger.error("4. make && make install") + logger.error("5. pip install kytea") + raise + return list(self.ja_word_tokenizer.getWS(text)) + + @property + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.vocab_size + def vocab_size(self): + return len(self.encoder) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_vocab + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.bpe + def bpe(self, token): + word = tuple(token[:-1]) + (token[-1] + "",) + if token in self.cache: + return self.cache[token] + pairs = get_pairs(word) + + if not pairs: + return token + "" + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + if word == "\n ": + word = "\n" + self.cache[token] = word + return word + def _tokenize(self, text): pre_tokens = self.bert_pre_tokenizer.tokenize(text) @@ -101,3 +495,155 @@ def _tokenize(self, text): split_tokens.extend([t for t in self.bpe(token).split(" ")]) return split_tokens + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index, self.unk_token) + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.convert_tokens_to_string + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = "".join(tokens).replace("", " ").strip() + return out_string + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.build_inputs_with_special_tokens + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. An XLM sequence has the following format: + + - single sequence: ` X ` + - pair of sequences: ` A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + + """ + bos = [self.bos_token_id] + sep = [self.sep_token_id] + + if token_ids_1 is None: + return bos + token_ids_0 + sep + return bos + token_ids_0 + sep + token_ids_1 + sep + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.get_special_tokens_mask + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.create_token_type_ids_from_sequences + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. An XLM sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__getstate__ + def __getstate__(self): + state = self.__dict__.copy() + state["sm"] = None + return state + + # Copied from transformers.models.xlm.tokenization_xlm.XLMTokenizer.__setstate__ + def __setstate__(self, d): + self.__dict__ = d + + try: + import sacremoses + except ImportError: + raise ImportError( + "You need to install sacremoses to use XLMTokenizer. " + "See https://pypi.org/project/sacremoses/ for installation." + ) + + self.sm = sacremoses From 06514b3e1a00f031ca5bf999a87081fd1fe8b700 Mon Sep 17 00:00:00 2001 From: Patrick von Platen Date: Fri, 7 Oct 2022 18:19:15 +0200 Subject: [PATCH 497/539] Clip device map (#19409) * add first generation tutorial * uP * [Clip] Add text model to device map --- src/transformers/models/clip/modeling_clip.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index 799d0ef0462afc..d3af1c055b4e94 100755 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -688,6 +688,8 @@ def _build_causal_attention_mask(self, bsz, seq_len, dtype): class CLIPTextModel(CLIPPreTrainedModel): config_class = CLIPTextConfig + _no_split_modules = ["CLIPEncoderLayer"] + def __init__(self, config: CLIPTextConfig): super().__init__(config) self.text_model = CLIPTextTransformer(config) From 6ef16f2b67bdf2797297d65f72efc68256d11e3f Mon Sep 17 00:00:00 2001 From: Infrared1029 <60873139+Infrared1029@users.noreply.github.com> Date: Fri, 7 Oct 2022 18:19:50 +0200 Subject: [PATCH 498/539] Remove Dependency between Bart and LED (slow/fast) (#19408) * removed dependency from bart(slow) * removed dependency from bart(slow) * adding copying comments (copied from bart to led) * updated led docstring * updated led docstring * removed dependency from Bart (fast) * replaced bart with LED in docstrings * complying flake8 * added more copy comments * fixing copying comments * added comments back * fix copy comments * fixing copied from comments * fixing copied from comments --- .../models/led/tokenization_led.py | 382 +++++++++++++++++- .../models/led/tokenization_led_fast.py | 252 +++++++++++- 2 files changed, 619 insertions(+), 15 deletions(-) diff --git a/src/transformers/models/led/tokenization_led.py b/src/transformers/models/led/tokenization_led.py index 84232ef517ec39..812e374c7a485d 100644 --- a/src/transformers/models/led/tokenization_led.py +++ b/src/transformers/models/led/tokenization_led.py @@ -14,15 +14,24 @@ # limitations under the License. """Tokenization classes for LED.""" -from typing import Dict, Optional, Union +import json +import os +from functools import lru_cache +from typing import Dict, List, Optional, Tuple, Union +import regex as re + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging -from ..bart.tokenization_bart import BartTokenizer logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} + +# See all LED models at https://huggingface.co/models?filter=LED PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", @@ -40,18 +49,377 @@ } -class LEDTokenizer(BartTokenizer): +@lru_cache() +# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. """ - Construct a LED tokenizer. + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) - [`LEDTokenizer`] is identical to [`BartTokenizer`] and runs end-to-end tokenization: punctuation splitting and - wordpiece. - Refer to superclass [`BartTokenizer`] for usage examples and documentation concerning parameters. +# Copied from transformers.models.bart.tokenization_bart.get_pairs +def get_pairs(word): """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class LEDTokenizer(PreTrainedTokenizer): + """ + Constructs a LED tokenizer, which is smilar to the ROBERTa tokenizer, using byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ``` + >>> from transformers import LEDTokenizer + >>> tokenizer = LEDTokenizer.from_pretrained("allenai/led-base-16384") + >>> tokenizer("Hello world")['input_ids'] + [0, 31414, 232, 2] + >>> tokenizer(" Hello world")['input_ids'] + [0, 20920, 232, 2] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). + + + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (BART tokenizer detect beginning of words by the preceding space). + """ + + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.__init__ + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + bos_token="", + eos_token="", + sep_token="", + cls_token="", + unk_token="", + pad_token="", + mask_token="", + add_prefix_space=False, + **kwargs + ): + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token + cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token + + # Mask token behave like a normal word, i.e. include the space before it + mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token + + super().__init__( + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + sep_token=sep_token, + cls_token=cls_token, + pad_token=pad_token, + mask_token=mask_token, + add_prefix_space=add_prefix_space, + **kwargs, + ) + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().split("\n")[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + self.add_prefix_space = add_prefix_space + + # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions + self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + @property + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size + def vocab_size(self): + return len(self.encoder) + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.get_vocab + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.bpe + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._tokenize + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._convert_token_to_id + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._convert_id_to_token + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.convert_tokens_to_string + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.build_inputs_with_special_tokens with BART->LED + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A LED sequence has the following format: + + - single sequence: ` X ` + - pair of sequences: ` A B ` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + sep + token_ids_1 + sep + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.get_special_tokens_mask + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.create_token_type_ids_from_sequences with BART->LED + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not + make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.prepare_for_tokenization + def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): + add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) + if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): + text = " " + text + return (text, kwargs) def _pad( self, diff --git a/src/transformers/models/led/tokenization_led_fast.py b/src/transformers/models/led/tokenization_led_fast.py index 5bcd5d7895da9b..ee9118f11d680f 100644 --- a/src/transformers/models/led/tokenization_led_fast.py +++ b/src/transformers/models/led/tokenization_led_fast.py @@ -14,16 +14,22 @@ # limitations under the License. """Tokenization classes for LED.""" -from typing import Dict, Optional, Union +import json +from typing import Dict, List, Optional, Tuple, Union -from ...tokenization_utils_base import BatchEncoding, EncodedInput +from tokenizers import pre_tokenizers, processors + +from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput +from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging -from ..bart.tokenization_bart_fast import BartTokenizerFast from .tokenization_led import LEDTokenizer logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} + PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", @@ -41,19 +47,249 @@ } -class LEDTokenizerFast(BartTokenizerFast): +class LEDTokenizerFast(PreTrainedTokenizerFast): r""" - Construct a "fast" LED tokenizer (backed by HuggingFace's *tokenizers* library). + Construct a "fast" LED tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 tokenizer, + using byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ``` + >>> from transformers import LEDTokenizerFast + >>> tokenizer = LEDTokenizerFast.from_pretrained("allenai/led-base-16384") + >>> tokenizer("Hello world")['input_ids'] + [0, 31414, 232, 2] + >>> tokenizer(" Hello world")['input_ids'] + [0, 20920, 232, 2] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. + + + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + - [`LEDTokenizerFast`] is identical to [`BartTokenizerFast`] and runs end-to-end tokenization: punctuation splitting - and wordpiece. + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. - Refer to superclass [`BartTokenizerFast`] for usage examples and documentation concerning parameters. + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (LED tokenizer detect beginning of words by the preceding space). + trim_offsets (`bool`, *optional*, defaults to `True`): + Whether the post processing step should trim offsets to avoid including whitespaces. """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = LEDTokenizer + model_input_names = ["input_ids", "attention_mask"] + + # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.__init__ + def __init__( + self, + vocab_file=None, + merges_file=None, + tokenizer_file=None, + errors="replace", + bos_token="", + eos_token="", + sep_token="", + cls_token="", + unk_token="", + pad_token="", + mask_token="", + add_prefix_space=False, + trim_offsets=True, + **kwargs + ): + super().__init__( + vocab_file, + merges_file, + tokenizer_file=tokenizer_file, + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + sep_token=sep_token, + cls_token=cls_token, + unk_token=unk_token, + pad_token=pad_token, + mask_token=mask_token, + add_prefix_space=add_prefix_space, + trim_offsets=trim_offsets, + **kwargs, + ) + + pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) + if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) + pre_tok_state["add_prefix_space"] = add_prefix_space + self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) + + self.add_prefix_space = add_prefix_space + + # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` + tokenizer_component = "post_processor" + tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) + if tokenizer_component_instance: + state = json.loads(tokenizer_component_instance.__getstate__()) + + # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` + if "sep" in state: + state["sep"] = tuple(state["sep"]) + if "cls" in state: + state["cls"] = tuple(state["cls"]) + + changes_to_apply = False + + if state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + state["add_prefix_space"] = add_prefix_space + changes_to_apply = True + + if state.get("trim_offsets", trim_offsets) != trim_offsets: + state["trim_offsets"] = trim_offsets + changes_to_apply = True + + if changes_to_apply: + component_class = getattr(processors, state.pop("type")) + new_value = component_class(**state) + setattr(self.backend_tokenizer, tokenizer_component, new_value) + + @property + # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED + def mask_token(self) -> str: + """ + `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not + having been set. + + LED tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily + comprise the space before the **. + """ + if self._mask_token is None: + if self.verbose: + logger.error("Using mask_token, but it is not set yet.") + return None + return str(self._mask_token) + + @mask_token.setter + def mask_token(self, value): + """ + Overriding the default behavior of the mask token to have it eat the space before it. + + This is needed to preserve backward compatibility with all the previously used models based on LED. + """ + # Mask token behave like a normal word, i.e. include the space before it + # So we set lstrip to True + value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value + self._mask_token = value + + # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast._batch_encode_plus + def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + + if is_split_into_words and not self.add_prefix_space: + raise ValueError( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._batch_encode_plus(*args, **kwargs) + + # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast._encode_plus + def _encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + + if is_split_into_words and not self.add_prefix_space: + raise ValueError( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._encode_plus(*args, **kwargs) + + # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.save_vocabulary + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) + + # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.build_inputs_with_special_tokens + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id] + if token_ids_1 is None: + return output + + return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id] + + # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.create_token_type_ids_from_sequences with BART->LED + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not + make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] # Copied from transformers.models.led.tokenization_led.LEDTokenizer._pad def _pad( From 7418a48e34875092f3213753b890cfb2f5dcb32a Mon Sep 17 00:00:00 2001 From: Blip blop Date: Fri, 7 Oct 2022 21:54:04 +0530 Subject: [PATCH 499/539] Removed `Bert` interdependency in `tokenization_electra.py` (#19356) * Copied from BertTokenizer() in tokenization_bert * Added BasicTokenizer and WordPieceTokenizer Class * Update src/transformers/models/electra/tokenization_electra.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Added copied from comments for basicTokenizer and WordPieceTokenizer * Updated the comments for the tokenizerClasses * Update src/transformers/models/electra/tokenization_electra.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/electra/tokenization_electra.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Formatted tokenization_electra with `make style` * Fix repo inconsistencies * Update src/transformers/models/electra/tokenization_electra.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Set the logger Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- .../models/electra/tokenization_electra.py | 471 +++++++++++++++++- 1 file changed, 464 insertions(+), 7 deletions(-) diff --git a/src/transformers/models/electra/tokenization_electra.py b/src/transformers/models/electra/tokenization_electra.py index 2feeaaa2a7485a..e34484a0761715 100644 --- a/src/transformers/models/electra/tokenization_electra.py +++ b/src/transformers/models/electra/tokenization_electra.py @@ -13,8 +13,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..bert.tokenization_bert import BertTokenizer +import collections +import os +import unicodedata +from typing import List, Optional, Tuple +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace +from ...utils import logging + + +logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} @@ -59,17 +67,466 @@ } -class ElectraTokenizer(BertTokenizer): +# Copied from transformers.models.bert.tokenization_bert.load_vocab +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BertTokenizer with Bert->Electra +class ElectraTokenizer(PreTrainedTokenizer): r""" - Construct an ELECTRA tokenizer. + Construct a BERT tokenizer. Based on WordPiece. - [`ElectraTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation splitting and - wordpiece. + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. - Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters. + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + do_basic_tokenize (`bool`, *optional*, defaults to `True`): + Whether or not to do basic tokenization before WordPiece. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP - max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + + def __init__( + self, + vocab_file, + do_lower_case=True, + do_basic_tokenize=True, + never_split=None, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs + ): + super().__init__( + do_lower_case=do_lower_case, + do_basic_tokenize=do_basic_tokenize, + never_split=never_split, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" + " model use `tokenizer = ElectraTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + ) + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token) + + @property + def do_lower_case(self): + return self.basic_tokenizer.do_lower_case + + @property + def vocab_size(self): + return len(self.vocab) + + def get_vocab(self): + return dict(self.vocab, **self.added_tokens_encoder) + + def _tokenize(self, text): + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens): + + # If the token is part of the never_split set + if token in self.basic_tokenizer.never_split: + split_tokens.append(token) + else: + split_tokens += self.wordpiece_tokenizer.tokenize(token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = " ".join(tokens).replace(" ##", "").strip() + return out_string + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + else: + vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(token + "\n") + index += 1 + return (vocab_file,) + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + """ + + def __init__(self, do_lower_case=True, never_split=None, tokenize_chinese_chars=True, strip_accents=None): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. Split on "white spaces" only, for sub-word tokenization, see + WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + orig_tokens = whitespace_tokenize(text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if never_split is not None and text in never_split: + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + + For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through *BasicTokenizer*. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens From 34e0cc6d86b42319aee53fd1f474e2f5862e7b97 Mon Sep 17 00:00:00 2001 From: mustapha ajeghrir <66799406+Mustapha-AJEGHRIR@users.noreply.github.com> Date: Fri, 7 Oct 2022 19:42:24 +0200 Subject: [PATCH 500/539] Make `Camembert` TF version independent from `Roberta` (#19364) * camembert tf version independent * fixup * fixup, all working * remove comments * Adding copied from roberta Co-authored-by: Mustapha AJEGHRIR --- src/transformers/__init__.py | 2 + src/transformers/models/camembert/__init__.py | 2 + .../models/camembert/modeling_tf_camembert.py | 1658 ++++++++++++++++- src/transformers/utils/dummy_tf_objects.py | 7 + 4 files changed, 1618 insertions(+), 51 deletions(-) diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 026ec59eb1763c..370a347c9fcc10 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -2314,6 +2314,7 @@ "TFCamembertForSequenceClassification", "TFCamembertForTokenClassification", "TFCamembertModel", + "TFCamembertPreTrainedModel", ] ) _import_structure["models.clip"].extend( @@ -4981,6 +4982,7 @@ TFCamembertForSequenceClassification, TFCamembertForTokenClassification, TFCamembertModel, + TFCamembertPreTrainedModel, ) from .models.clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, diff --git a/src/transformers/models/camembert/__init__.py b/src/transformers/models/camembert/__init__.py index 40aa94e2e45a9b..133cf41ff6edf1 100644 --- a/src/transformers/models/camembert/__init__.py +++ b/src/transformers/models/camembert/__init__.py @@ -81,6 +81,7 @@ "TFCamembertForSequenceClassification", "TFCamembertForTokenClassification", "TFCamembertModel", + "TFCamembertPreTrainedModel", ] @@ -136,6 +137,7 @@ TFCamembertForSequenceClassification, TFCamembertForTokenClassification, TFCamembertModel, + TFCamembertPreTrainedModel, ) else: diff --git a/src/transformers/models/camembert/modeling_tf_camembert.py b/src/transformers/models/camembert/modeling_tf_camembert.py index 708282188bbbaa..9e891e97fda99e 100644 --- a/src/transformers/models/camembert/modeling_tf_camembert.py +++ b/src/transformers/models/camembert/modeling_tf_camembert.py @@ -15,21 +15,55 @@ # limitations under the License. """ TF 2.0 CamemBERT model.""" -from ...utils import add_start_docstrings, logging -from ..roberta.modeling_tf_roberta import ( - TFRobertaForCausalLM, - TFRobertaForMaskedLM, - TFRobertaForMultipleChoice, - TFRobertaForQuestionAnswering, - TFRobertaForSequenceClassification, - TFRobertaForTokenClassification, - TFRobertaModel, +import math +import warnings +from typing import Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import ( + TFBaseModelOutputWithPastAndCrossAttentions, + TFBaseModelOutputWithPoolingAndCrossAttentions, + TFCausalLMOutputWithCrossAttentions, + TFMaskedLMOutput, + TFMultipleChoiceModelOutput, + TFQuestionAnsweringModelOutput, + TFSequenceClassifierOutput, + TFTokenClassifierOutput, +) +from ...modeling_tf_utils import ( + TFCausalLanguageModelingLoss, + TFMaskedLanguageModelingLoss, + TFModelInputType, + TFMultipleChoiceLoss, + TFPreTrainedModel, + TFQuestionAnsweringLoss, + TFSequenceClassificationLoss, + TFTokenClassificationLoss, + get_initializer, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import shape_list, stable_softmax +from ...utils import ( + DUMMY_INPUTS, + MULTIPLE_CHOICE_DUMMY_INPUTS, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, ) from .configuration_camembert import CamembertConfig logger = logging.get_logger(__name__) +_CHECKPOINT_FOR_DOC = "camembert-base" +_CONFIG_FOR_DOC = "CamembertConfig" +_TOKENIZER_FOR_DOC = "CamembertTokenizer" + TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ # See all CamemBERT models at https://huggingface.co/models?filter=camembert ] @@ -77,31 +111,1076 @@ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ +CAMEMBERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`CamembertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaEmbeddings +class TFCamembertEmbeddings(tf.keras.layers.Layer): + """ + Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. + """ + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.padding_idx = 1 + self.vocab_size = config.vocab_size + self.type_vocab_size = config.type_vocab_size + self.hidden_size = config.hidden_size + self.max_position_embeddings = config.max_position_embeddings + self.initializer_range = config.initializer_range + self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) + + def build(self, input_shape: tf.TensorShape): + with tf.name_scope("word_embeddings"): + self.weight = self.add_weight( + name="weight", + shape=[self.vocab_size, self.hidden_size], + initializer=get_initializer(self.initializer_range), + ) + + with tf.name_scope("token_type_embeddings"): + self.token_type_embeddings = self.add_weight( + name="embeddings", + shape=[self.type_vocab_size, self.hidden_size], + initializer=get_initializer(self.initializer_range), + ) + + with tf.name_scope("position_embeddings"): + self.position_embeddings = self.add_weight( + name="embeddings", + shape=[self.max_position_embeddings, self.hidden_size], + initializer=get_initializer(self.initializer_range), + ) + + super().build(input_shape) + + def create_position_ids_from_input_ids(self, input_ids, past_key_values_length=0): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding + symbols are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + input_ids: tf.Tensor + Returns: tf.Tensor + """ + mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype) + incremental_indices = (tf.math.cumsum(mask, axis=1) + past_key_values_length) * mask + + return incremental_indices + self.padding_idx + + def call( + self, + input_ids=None, + position_ids=None, + token_type_ids=None, + inputs_embeds=None, + past_key_values_length=0, + training=False, + ): + """ + Applies embedding based on inputs tensor. + + Returns: + final_embeddings (`tf.Tensor`): output embedding tensor. + """ + assert not (input_ids is None and inputs_embeds is None) + + if input_ids is not None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.vocab_size, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.vocab_size})" + ), + ) + inputs_embeds = tf.gather(params=self.weight, indices=input_ids) + + input_shape = shape_list(inputs_embeds)[:-1] + + if token_type_ids is None: + token_type_ids = tf.fill(dims=input_shape, value=0) + + if position_ids is None: + if input_ids is not None: + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = self.create_position_ids_from_input_ids( + input_ids=input_ids, past_key_values_length=past_key_values_length + ) + else: + position_ids = tf.expand_dims( + tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0 + ) + + position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) + token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) + final_embeddings = inputs_embeds + position_embeds + token_type_embeds + final_embeddings = self.LayerNorm(inputs=final_embeddings) + final_embeddings = self.dropout(inputs=final_embeddings, training=training) + + return final_embeddings + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Camembert +class TFCamembertPooler(tf.keras.layers.Layer): + def __init__(self, config: CamembertConfig, **kwargs): + super().__init__(**kwargs) + + self.dense = tf.keras.layers.Dense( + units=config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + activation="tanh", + name="dense", + ) + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(inputs=first_token_tensor) + + return pooled_output + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Camembert +class TFCamembertSelfAttention(tf.keras.layers.Layer): + def __init__(self, config: CamembertConfig, **kwargs): + super().__init__(**kwargs) + + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number " + f"of attention heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.sqrt_att_head_size = math.sqrt(self.attention_head_size) + + self.query = tf.keras.layers.Dense( + units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" + ) + self.key = tf.keras.layers.Dense( + units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" + ) + self.value = tf.keras.layers.Dense( + units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" + ) + self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor: + # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] + tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size)) + + # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size] + return tf.transpose(tensor, perm=[0, 2, 1, 3]) + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + head_mask: tf.Tensor, + encoder_hidden_states: tf.Tensor, + encoder_attention_mask: tf.Tensor, + past_key_value: Tuple[tf.Tensor], + output_attentions: bool, + training: bool = False, + ) -> Tuple[tf.Tensor]: + batch_size = shape_list(hidden_states)[0] + mixed_query_layer = self.query(inputs=hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size) + value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) + value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) + key_layer = tf.concat([past_key_value[0], key_layer], axis=2) + value_layer = tf.concat([past_key_value[1], value_layer], axis=2) + else: + key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size) + value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size) + + query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) + + if self.is_decoder: + # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + # (batch size, num_heads, seq_len_q, seq_len_k) + attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True) + dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype) + attention_scores = tf.divide(attention_scores, dk) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in TFCamembertModel call() function) + attention_scores = tf.add(attention_scores, attention_mask) + + # Normalize the attention scores to probabilities. + attention_probs = stable_softmax(logits=attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(inputs=attention_probs, training=training) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = tf.multiply(attention_probs, head_mask) + + attention_output = tf.matmul(attention_probs, value_layer) + attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3]) + + # (batch_size, seq_len_q, all_head_size) + attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size)) + outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Camembert +class TFCamembertSelfOutput(tf.keras.layers.Layer): + def __init__(self, config: CamembertConfig, **kwargs): + super().__init__(**kwargs) + + self.dense = tf.keras.layers.Dense( + units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) + + def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) + + return hidden_states + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Camembert +class TFCamembertAttention(tf.keras.layers.Layer): + def __init__(self, config: CamembertConfig, **kwargs): + super().__init__(**kwargs) + + self.self_attention = TFCamembertSelfAttention(config, name="self") + self.dense_output = TFCamembertSelfOutput(config, name="output") + + def prune_heads(self, heads): + raise NotImplementedError + + def call( + self, + input_tensor: tf.Tensor, + attention_mask: tf.Tensor, + head_mask: tf.Tensor, + encoder_hidden_states: tf.Tensor, + encoder_attention_mask: tf.Tensor, + past_key_value: Tuple[tf.Tensor], + output_attentions: bool, + training: bool = False, + ) -> Tuple[tf.Tensor]: + self_outputs = self.self_attention( + hidden_states=input_tensor, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=past_key_value, + output_attentions=output_attentions, + training=training, + ) + attention_output = self.dense_output( + hidden_states=self_outputs[0], input_tensor=input_tensor, training=training + ) + # add attentions (possibly with past_key_value) if we output them + outputs = (attention_output,) + self_outputs[1:] + + return outputs + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Camembert +class TFCamembertIntermediate(tf.keras.layers.Layer): + def __init__(self, config: CamembertConfig, **kwargs): + super().__init__(**kwargs) + + self.dense = tf.keras.layers.Dense( + units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = get_tf_activation(config.hidden_act) + else: + self.intermediate_act_fn = config.hidden_act + + def call(self, hidden_states: tf.Tensor) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + + return hidden_states + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Camembert +class TFCamembertOutput(tf.keras.layers.Layer): + def __init__(self, config: CamembertConfig, **kwargs): + super().__init__(**kwargs) + + self.dense = tf.keras.layers.Dense( + units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) + + def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor: + hidden_states = self.dense(inputs=hidden_states) + hidden_states = self.dropout(inputs=hidden_states, training=training) + hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor) + + return hidden_states + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Camembert +class TFCamembertLayer(tf.keras.layers.Layer): + def __init__(self, config: CamembertConfig, **kwargs): + super().__init__(**kwargs) + + self.attention = TFCamembertAttention(config, name="attention") + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = TFCamembertAttention(config, name="crossattention") + self.intermediate = TFCamembertIntermediate(config, name="intermediate") + self.bert_output = TFCamembertOutput(config, name="output") + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + head_mask: tf.Tensor, + encoder_hidden_states: Optional[tf.Tensor], + encoder_attention_mask: Optional[tf.Tensor], + past_key_value: Optional[Tuple[tf.Tensor]], + output_attentions: bool, + training: bool = False, + ) -> Tuple[tf.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + input_tensor=hidden_states, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=None, + encoder_attention_mask=None, + past_key_value=self_attn_past_key_value, + output_attentions=output_attentions, + training=training, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + input_tensor=attention_output, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + training=training, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + intermediate_output = self.intermediate(hidden_states=attention_output) + layer_output = self.bert_output( + hidden_states=intermediate_output, input_tensor=attention_output, training=training + ) + outputs = (layer_output,) + outputs # add attentions if we output them + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + +# Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Camembert +class TFCamembertEncoder(tf.keras.layers.Layer): + def __init__(self, config: CamembertConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.layer = [TFCamembertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] + + def call( + self, + hidden_states: tf.Tensor, + attention_mask: tf.Tensor, + head_mask: tf.Tensor, + encoder_hidden_states: Optional[tf.Tensor], + encoder_attention_mask: Optional[tf.Tensor], + past_key_values: Optional[Tuple[Tuple[tf.Tensor]]], + use_cache: Optional[bool], + output_attentions: bool, + output_hidden_states: bool, + return_dict: bool, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + past_key_value = past_key_values[i] if past_key_values is not None else None + + layer_outputs = layer_module( + hidden_states=hidden_states, + attention_mask=attention_mask, + head_mask=head_mask[i], + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_value=past_key_value, + output_attentions=output_attentions, + training=training, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + if self.config.add_cross_attention and encoder_hidden_states is not None: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + # Add last layer + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None + ) + + return TFBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_attentions, + cross_attentions=all_cross_attentions, + ) + + +@keras_serializable +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaMainLayer with Roberta->Camembert +class TFCamembertMainLayer(tf.keras.layers.Layer): + config_class = CamembertConfig + + def __init__(self, config, add_pooling_layer=True, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.is_decoder = config.is_decoder + + self.num_hidden_layers = config.num_hidden_layers + self.initializer_range = config.initializer_range + self.output_attentions = config.output_attentions + self.output_hidden_states = config.output_hidden_states + self.return_dict = config.use_return_dict + self.encoder = TFCamembertEncoder(config, name="encoder") + self.pooler = TFCamembertPooler(config, name="pooler") if add_pooling_layer else None + # The embeddings must be the last declaration in order to follow the weights order + self.embeddings = TFCamembertEmbeddings(config, name="embeddings") + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings + def get_input_embeddings(self) -> tf.keras.layers.Layer: + return self.embeddings + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings + def set_input_embeddings(self, value: tf.Variable): + self.embeddings.weight = value + self.embeddings.vocab_size = shape_list(value)[0] + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + raise NotImplementedError + + @unpack_inputs + # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.call + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutputWithPoolingAndCrossAttentions, Tuple[tf.Tensor]]: + + if not self.config.is_decoder: + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + + if past_key_values is None: + past_key_values_length = 0 + past_key_values = [None] * len(self.encoder.layer) + else: + past_key_values_length = shape_list(past_key_values[0][0])[-2] + + if attention_mask is None: + attention_mask = tf.fill(dims=(batch_size, seq_length + past_key_values_length), value=1) + + if token_type_ids is None: + token_type_ids = tf.fill(dims=input_shape, value=0) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + past_key_values_length=past_key_values_length, + training=training, + ) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + attention_mask_shape = shape_list(attention_mask) + + mask_seq_length = seq_length + past_key_values_length + # Copied from `modeling_tf_t5.py` + # Provided a padding mask of dimensions [batch_size, mask_seq_length] + # - if the model is a decoder, apply a causal mask in addition to the padding mask + # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] + if self.is_decoder: + seq_ids = tf.range(mask_seq_length) + causal_mask = tf.less_equal( + tf.tile(seq_ids[None, None, :], (batch_size, mask_seq_length, 1)), + seq_ids[None, :, None], + ) + causal_mask = tf.cast(causal_mask, dtype=attention_mask.dtype) + extended_attention_mask = causal_mask * attention_mask[:, None, :] + attention_mask_shape = shape_list(extended_attention_mask) + extended_attention_mask = tf.reshape( + extended_attention_mask, (attention_mask_shape[0], 1, attention_mask_shape[1], attention_mask_shape[2]) + ) + if past_key_values[0] is not None: + # attention_mask needs to be sliced to the shape `[batch_size, 1, from_seq_length - cached_seq_length, to_seq_length] + extended_attention_mask = extended_attention_mask[:, :, -seq_length:, :] + else: + extended_attention_mask = tf.reshape( + attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]) + ) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) + one_cst = tf.constant(1.0, dtype=embedding_output.dtype) + ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) + extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) + + # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 + if self.is_decoder and encoder_attention_mask is not None: + # If a 2D ou 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=extended_attention_mask.dtype) + num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) + if num_dims_encoder_attention_mask == 3: + encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] + if num_dims_encoder_attention_mask == 2: + encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] + + # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition + # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 + # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, + # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) + + encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 + else: + encoder_extended_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + if head_mask is not None: + raise NotImplementedError + else: + head_mask = [None] * self.config.num_hidden_layers + + encoder_outputs = self.encoder( + hidden_states=embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_extended_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None + + if not return_dict: + return ( + sequence_output, + pooled_output, + ) + encoder_outputs[1:] + + return TFBaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + past_key_values=encoder_outputs.past_key_values, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +class TFCamembertPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = CamembertConfig + base_model_prefix = "roberta" + + @property + # Copied from transformers.models.bert.modeling_tf_bert.TFBertPreTrainedModel.dummy_inputs + def dummy_inputs(self): + """ + Dummy inputs to build the network. + + Returns: + `Dict[str, tf.Tensor]`: The dummy inputs. + """ + dummy = {"input_ids": tf.constant(DUMMY_INPUTS)} + # Add `encoder_hidden_states` to make the cross-attention layers' weights initialized + if self.config.add_cross_attention: + batch_size, seq_len = tf.constant(DUMMY_INPUTS).shape + shape = (batch_size, seq_len) + (self.config.hidden_size,) + h = tf.random.uniform(shape=shape) + dummy["encoder_hidden_states"] = h + + return dummy + + @tf.function( + input_signature=[ + { + "input_ids": tf.TensorSpec((None, None), tf.int64, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None), tf.int64, name="attention_mask"), + } + ] + ) + def serving(self, inputs): + output = self.call(inputs) + + return self.serving_output(output) + @add_start_docstrings( "The bare CamemBERT Model transformer outputting raw hidden-states without any specific head on top.", CAMEMBERT_START_DOCSTRING, ) -class TFCamembertModel(TFRobertaModel): - """ - This class overrides [`TFRobertaModel`]. Please check the superclass for the appropriate documentation alongside - usage examples. - """ +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaModel with Roberta->Camembert, ROBERTA->CAMEMBERT +class TFCamembertModel(TFCamembertPreTrainedModel): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.roberta = TFCamembertMainLayer(config, name="roberta") - config_class = CamembertConfig + @unpack_inputs + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFBaseModelOutputWithPoolingAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: Optional[bool] = False, + ) -> Union[Tuple, TFBaseModelOutputWithPoolingAndCrossAttentions]: + r""" + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) + contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). Set to `False` during training, `True` during generation + """ + outputs = self.roberta( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertModel.serving_output + def serving_output( + self, output: TFBaseModelOutputWithPoolingAndCrossAttentions + ) -> TFBaseModelOutputWithPoolingAndCrossAttentions: + output_cache = self.config.use_cache and self.config.is_decoder + pkv = tf.convert_to_tensor(output.past_key_values) if output_cache else None + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + cross_attns = tf.convert_to_tensor(output.cross_attentions) if output.cross_attentions is not None else None + if not (self.config.output_attentions and self.config.add_cross_attention): + cross_attns = None + + return TFBaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=output.last_hidden_state, + pooler_output=output.pooler_output, + past_key_values=pkv, + hidden_states=hs, + attentions=attns, + cross_attentions=cross_attns, + ) + + +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead with Roberta->Camembert +class TFCamembertLMHead(tf.keras.layers.Layer): + """Camembert Head for masked language modeling.""" + + def __init__(self, config, input_embeddings, **kwargs): + super().__init__(**kwargs) + + self.vocab_size = config.vocab_size + self.hidden_size = config.hidden_size + self.dense = tf.keras.layers.Dense( + config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") + self.act = get_tf_activation("gelu") + + # The output weights are the same as the input embeddings, but there is + # an output-only bias for each token. + self.decoder = input_embeddings + + def build(self, input_shape): + self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") + + super().build(input_shape) + + def get_output_embeddings(self): + return self.decoder + + def set_output_embeddings(self, value): + self.decoder.weight = value + self.decoder.vocab_size = shape_list(value)[0] + + def get_bias(self): + return {"bias": self.bias} + + def set_bias(self, value): + self.bias = value["bias"] + self.vocab_size = shape_list(value["bias"])[0] + + def call(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.layer_norm(hidden_states) + + # project back to size of vocabulary with bias + seq_length = shape_list(tensor=hidden_states)[1] + hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) + hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True) + hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size]) + hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) + + return hidden_states @add_start_docstrings( """CamemBERT Model with a `language modeling` head on top.""", CAMEMBERT_START_DOCSTRING, ) -class TFCamembertForMaskedLM(TFRobertaForMaskedLM): - """ - This class overrides [`TFRobertaForMaskedLM`]. Please check the superclass for the appropriate documentation - alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMaskedLM with Roberta->Camembert, ROBERTA->CAMEMBERT +class TFCamembertForMaskedLM(TFCamembertPreTrainedModel, TFMaskedLanguageModelingLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] - config_class = CamembertConfig + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta") + self.lm_head = TFCamembertLMHead(config, self.roberta.embeddings, name="lm_head") + + def get_lm_head(self): + return self.lm_head + + def get_prefix_bias_name(self): + warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) + return self.name + "/" + self.lm_head.name + + @unpack_inputs + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFMaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + mask="", + expected_output="' Paris'", + expected_loss=0.1, + ) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: Optional[bool] = False, + ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + """ + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = outputs[0] + prediction_scores = self.lm_head(sequence_output) + + loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) + + if not return_dict: + output = (prediction_scores,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFMaskedLMOutput( + loss=loss, + logits=prediction_scores, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output + def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns) + + +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaClassificationHead +class TFCamembertClassificationHead(tf.keras.layers.Layer): + """Head for sentence-level classification tasks.""" + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + self.dense = tf.keras.layers.Dense( + config.hidden_size, + kernel_initializer=get_initializer(config.initializer_range), + activation="tanh", + name="dense", + ) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = tf.keras.layers.Dropout(classifier_dropout) + self.out_proj = tf.keras.layers.Dense( + config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" + ) + + def call(self, features, training=False): + x = features[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x, training=training) + x = self.dense(x) + x = self.dropout(x, training=training) + x = self.out_proj(x) + return x @add_start_docstrings( @@ -111,13 +1190,82 @@ class TFCamembertForMaskedLM(TFRobertaForMaskedLM): """, CAMEMBERT_START_DOCSTRING, ) -class TFCamembertForSequenceClassification(TFRobertaForSequenceClassification): - """ - This class overrides [`TFRobertaForSequenceClassification`]. Please check the superclass for the appropriate - documentation alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForSequenceClassification with Roberta->Camembert, ROBERTA->CAMEMBERT +class TFCamembertForSequenceClassification(TFCamembertPreTrainedModel, TFSequenceClassificationLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] - config_class = CamembertConfig + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta") + self.classifier = TFCamembertClassificationHead(config, name="classifier") + + @unpack_inputs + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="cardiffnlp/twitter-roberta-base-emotion", + output_type=TFSequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="'optimism'", + expected_loss=0.08, + ) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: Optional[bool] = False, + ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + sequence_output = outputs[0] + logits = self.classifier(sequence_output, training=training) + + loss = None if labels is None else self.hf_compute_loss(labels, logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFSequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output + def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( @@ -127,13 +1275,89 @@ class TFCamembertForSequenceClassification(TFRobertaForSequenceClassification): """, CAMEMBERT_START_DOCSTRING, ) -class TFCamembertForTokenClassification(TFRobertaForTokenClassification): - """ - This class overrides [`TFRobertaForTokenClassification`]. Please check the superclass for the appropriate - documentation alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForTokenClassification with Roberta->Camembert, ROBERTA->CAMEMBERT +class TFCamembertForTokenClassification(TFCamembertPreTrainedModel, TFTokenClassificationLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] + _keys_to_ignore_on_load_missing = [r"dropout"] - config_class = CamembertConfig + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta") + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = tf.keras.layers.Dropout(classifier_dropout) + self.classifier = tf.keras.layers.Dense( + config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" + ) + + @unpack_inputs + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="ydshieh/roberta-large-ner-english", + output_type=TFTokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="['O', 'ORG', 'ORG', 'O', 'O', 'O', 'O', 'O', 'LOC', 'O', 'LOC', 'LOC']", + expected_loss=0.01, + ) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: Optional[bool] = False, + ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output, training=training) + logits = self.classifier(sequence_output) + + loss = None if labels is None else self.hf_compute_loss(labels, logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFTokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output + def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( @@ -143,13 +1367,121 @@ class TFCamembertForTokenClassification(TFRobertaForTokenClassification): """, CAMEMBERT_START_DOCSTRING, ) -class TFCamembertForMultipleChoice(TFRobertaForMultipleChoice): - """ - This class overrides [`TFRobertaForMultipleChoice`]. Please check the superclass for the appropriate documentation - alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForMultipleChoice with Roberta->Camembert, ROBERTA->CAMEMBERT +class TFCamembertForMultipleChoice(TFCamembertPreTrainedModel, TFMultipleChoiceLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"lm_head"] + _keys_to_ignore_on_load_missing = [r"dropout"] - config_class = CamembertConfig + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.roberta = TFCamembertMainLayer(config, name="roberta") + self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) + self.classifier = tf.keras.layers.Dense( + 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" + ) + + @property + def dummy_inputs(self): + """ + Dummy inputs to build the network. + + Returns: + tf.Tensor with dummy inputs + """ + return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} + + @unpack_inputs + @add_start_docstrings_to_model_forward( + CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") + ) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFMultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: Optional[bool] = False, + ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]: + r""" + labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` + where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) + """ + + if input_ids is not None: + num_choices = shape_list(input_ids)[1] + seq_length = shape_list(input_ids)[2] + else: + num_choices = shape_list(inputs_embeds)[1] + seq_length = shape_list(inputs_embeds)[2] + + flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None + flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None + flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None + flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None + outputs = self.roberta( + flat_input_ids, + flat_attention_mask, + flat_token_type_ids, + flat_position_ids, + head_mask, + inputs_embeds, + output_attentions, + output_hidden_states, + return_dict=return_dict, + training=training, + ) + pooled_output = outputs[1] + pooled_output = self.dropout(pooled_output, training=training) + logits = self.classifier(pooled_output) + reshaped_logits = tf.reshape(logits, (-1, num_choices)) + + loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) + + if not return_dict: + output = (reshaped_logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFMultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + @tf.function( + input_signature=[ + { + "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), + "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), + } + ] + ) + def serving(self, inputs): + output = self.call(inputs) + + return self.serving_output(output) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output + def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( @@ -159,22 +1491,246 @@ class TFCamembertForMultipleChoice(TFRobertaForMultipleChoice): """, CAMEMBERT_START_DOCSTRING, ) -class TFCamembertForQuestionAnswering(TFRobertaForQuestionAnswering): - """ - This class overrides [`TFRobertaForQuestionAnswering`]. Please check the superclass for the appropriate - documentation alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForQuestionAnswering with Roberta->Camembert, ROBERTA->CAMEMBERT +class TFCamembertForQuestionAnswering(TFCamembertPreTrainedModel, TFQuestionAnsweringLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head"] - config_class = CamembertConfig + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + + self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta") + self.qa_outputs = tf.keras.layers.Dense( + config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" + ) + + @unpack_inputs + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint="ydshieh/roberta-base-squad2", + output_type=TFQuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + expected_output="' puppet'", + expected_loss=0.86, + ) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, + end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: Optional[bool] = False, + ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]: + r""" + start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + outputs = self.roberta( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = tf.split(logits, 2, axis=-1) + start_logits = tf.squeeze(start_logits, axis=-1) + end_logits = tf.squeeze(end_logits, axis=-1) + + loss = None + if start_positions is not None and end_positions is not None: + labels = {"start_position": start_positions} + labels["end_position"] = end_positions + loss = self.hf_compute_loss(labels, (start_logits, end_logits)) + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFQuestionAnsweringModelOutput( + loss=loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output + def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput: + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + + return TFQuestionAnsweringModelOutput( + start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns + ) @add_start_docstrings( """CamemBERT Model with a `language modeling` head on top for CLM fine-tuning.""", CAMEMBERT_START_DOCSTRING ) -class TFCamembertForCausalLM(TFRobertaForCausalLM): - """ - This class overrides [`TFRobertaForCausalLM`]. Please check the superclass for the appropriate documentation - alongside usage examples. - """ +# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaForCausalLM with Roberta->Camembert, ROBERTA->CAMEMBERT +class TFCamembertForCausalLM(TFCamembertPreTrainedModel, TFCausalLanguageModelingLoss): + # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model + _keys_to_ignore_on_load_unexpected = [r"pooler", r"lm_head.decoder.weight"] - config_class = CamembertConfig + def __init__(self, config: CamembertConfig, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + if not config.is_decoder: + logger.warning("If you want to use `TFCamembertLMHeadModel` as a standalone, add `is_decoder=True.`") + + self.roberta = TFCamembertMainLayer(config, add_pooling_layer=False, name="roberta") + self.lm_head = TFCamembertLMHead(config, input_embeddings=self.roberta.embeddings, name="lm_head") + + def get_lm_head(self): + return self.lm_head + + def get_prefix_bias_name(self): + warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) + return self.name + "/" + self.lm_head.name + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.prepare_inputs_for_generation + def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): + input_shape = input_ids.shape + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = tf.ones(input_shape) + + # cut decoder_input_ids if past is used + if past is not None: + input_ids = input_ids[:, -1:] + + return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} + + @unpack_inputs + @add_start_docstrings_to_model_forward(CAMEMBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFCausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: Optional[TFModelInputType] = None, + attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, + head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, + encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, + past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: Optional[Union[np.ndarray, tf.Tensor]] = None, + training: Optional[bool] = False, + ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: + r""" + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) + contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*, defaults to `True`): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). Set to `False` during training, `True` during generation + labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., + config.vocab_size - 1]`. + """ + outputs = self.roberta( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + past_key_values=past_key_values, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + sequence_output = outputs[0] + logits = self.lm_head(hidden_states=sequence_output, training=training) + loss = None + + if labels is not None: + # shift labels to the left and cut last logit token + shifted_logits = logits[:, :-1] + labels = labels[:, 1:] + loss = self.hf_compute_loss(labels=labels, logits=shifted_logits) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TFCausalLMOutputWithCrossAttentions( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.serving_output + def serving_output(self, output: TFCausalLMOutputWithCrossAttentions) -> TFCausalLMOutputWithCrossAttentions: + output_cache = self.config.use_cache and self.config.is_decoder + pkv = tf.convert_to_tensor(output.past_key_values) if output_cache else None + hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None + attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None + cross_attns = tf.convert_to_tensor(output.cross_attentions) if output.cross_attentions is not None else None + if not (self.config.output_attentions and self.config.add_cross_attention): + cross_attns = None + + return TFCausalLMOutputWithCrossAttentions( + logits=output.logits, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns + ) + + @staticmethod + # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel._reorder_cache + def _reorder_cache(past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(tf.gather(past_state, beam_idx, axis=0) for past_state in layer_past),) + return reordered_past diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 3acc7804687df7..37075076f9c802 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -637,6 +637,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +class TFCamembertPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = None From de4d71ea07b31c1bcef7ffccc3691f76658e291f Mon Sep 17 00:00:00 2001 From: Vishwas Date: Fri, 7 Oct 2022 23:15:24 +0530 Subject: [PATCH 501/539] Removed Bert dependency from BertGeneration code base. (#19370) * Copied all the code required from transformers.models.bert.modeling_bert to here * Fixed styling issues * Reformatted copied names with Model specific name. * Reverted BertEncoder part as there is already a class called BertGenerationEncoder * Added prefixes in missing places. Co-authored-by: vishwaspai --- .../modeling_bert_generation.py | 413 +++++++++++++++++- 1 file changed, 412 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/bert_generation/modeling_bert_generation.py b/src/transformers/models/bert_generation/modeling_bert_generation.py index 986217068b1c1b..3fc06450afb867 100755 --- a/src/transformers/models/bert_generation/modeling_bert_generation.py +++ b/src/transformers/models/bert_generation/modeling_bert_generation.py @@ -14,6 +14,7 @@ # limitations under the License. """PyTorch BERT model specific for generation.""" +import math from typing import Optional, Tuple, Union import torch @@ -21,8 +22,10 @@ from torch import nn from torch.nn import CrossEntropyLoss +from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( add_code_sample_docstrings, add_start_docstrings, @@ -30,7 +33,6 @@ logging, replace_return_docstrings, ) -from ..bert.modeling_bert import BertEncoder from .configuration_bert_generation import BertGenerationConfig @@ -41,6 +43,415 @@ _TOKENIZER_FOR_DOC = "BertGenerationTokenizer" +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BertGeneration +class BertGenerationSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->BertGeneration +class BertGenerationSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + seq_length = hidden_states.size()[1] + position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertGenerationModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BertGeneration +class BertGenerationAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = BertGenerationSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = BertGenerationSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BertGeneration +class BertGenerationIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BertGeneration +class BertGenerationOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->BertGeneration +class BertGenerationLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertGenerationAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = BertGenerationAttention(config, position_embedding_type="absolute") + self.intermediate = BertGenerationIntermediate(config) + self.output = BertGenerationOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->BertGeneration +class BertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([BertGenerationLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + if use_cache: + logger.warning( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + def load_tf_weights_in_bert_generation( model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False ): From 983451a13e5378908866309647afc94761fa8572 Mon Sep 17 00:00:00 2001 From: Alara Dirik <8944735+alaradirik@users.noreply.github.com> Date: Fri, 7 Oct 2022 23:34:41 +0300 Subject: [PATCH 502/539] Improve and fix ImageSegmentationPipeline (#19367) - Fixes the image segmentation pipeline test failures caused by changes to the postprocessing methods of supported models - Updates the ImageSegmentationPipeline tests - Improves docs, adds 'task' argument to optionally perform semantic, instance or panoptic segmentation --- .../pipelines/image_segmentation.py | 140 +++++++++--------- .../test_pipelines_image_segmentation.py | 114 +++++++++----- 2 files changed, 144 insertions(+), 110 deletions(-) diff --git a/src/transformers/pipelines/image_segmentation.py b/src/transformers/pipelines/image_segmentation.py index a33095cfc24ea1..690247f6e49b95 100644 --- a/src/transformers/pipelines/image_segmentation.py +++ b/src/transformers/pipelines/image_segmentation.py @@ -12,9 +12,6 @@ from ..image_utils import load_image if is_torch_available(): - import torch - from torch import nn - from ..models.auto.modeling_auto import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, @@ -59,13 +56,15 @@ def __init__(self, *args, **kwargs): def _sanitize_parameters(self, **kwargs): postprocess_kwargs = {} + if "task" in kwargs: + postprocess_kwargs["task"] = kwargs["task"] if "threshold" in kwargs: postprocess_kwargs["threshold"] = kwargs["threshold"] - if "mask_threshold" in kwargs: - postprocess_kwargs["mask_threshold"] = kwargs["mask_threshold"] + if "overlap_mask_area_threshold" in kwargs: + postprocess_kwargs["overlap_mask_area_threshold"] = kwargs["overlap_mask_area_threshold"] return {}, {}, postprocess_kwargs - def __call__(self, *args, **kwargs) -> Union[Predictions, List[Prediction]]: + def __call__(self, images, **kwargs) -> Union[Predictions, List[Prediction]]: """ Perform segmentation (detect masks & classes) in the image(s) passed as inputs. @@ -79,30 +78,34 @@ def __call__(self, *args, **kwargs) -> Union[Predictions, List[Prediction]]: The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the same format: all as HTTP(S) links, all as local paths, or all as PIL images. + task (`str`, defaults to `semantic`): + Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model + capabilities. threshold (`float`, *optional*, defaults to 0.9): - The probability necessary to make a prediction. - mask_threshold (`float`, *optional*, defaults to 0.5): - Threshold to use when turning the predicted masks into binary values. + Probability threshold to filter out predicted masks. + overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5): + Mask overlap threshold to eliminate small, disconnected segments. Return: A dictionary or a list of dictionaries containing the result. If the input is a single image, will return a list of dictionaries, if the input is a list of several images, will return a list of list of dictionaries corresponding to each image. - The dictionaries contain the following keys: + The dictionaries contain the mask, label and score (where applicable) of each detected object and contains + the following keys: - **label** (`str`) -- The class label identified by the model. - - **mask** (`PIL.Image`) -- Pil Image with size (heigth, width) of the original image. Pixel values in the - image are in the range 0-255. 0 means the pixel is *not* part of the *label*, 255 means it definitely is. + - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of + the original image. Returns a mask filled with zeros if no object is found. - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the "object" described by the label and the mask. """ - return super().__call__(*args, **kwargs) + return super().__call__(images, **kwargs) def preprocess(self, image): image = load_image(image) - target_size = torch.IntTensor([[image.height, image.width]]) + target_size = [(image.height, image.width)] inputs = self.feature_extractor(images=[image], return_tensors="pt") inputs["target_size"] = target_size return inputs @@ -113,66 +116,65 @@ def _forward(self, model_inputs): model_outputs["target_size"] = target_size return model_outputs - def postprocess(self, model_outputs, raw_image=False, threshold=0.9, mask_threshold=0.5): - if hasattr(self.feature_extractor, "post_process_panoptic_segmentation"): + def postprocess(self, model_outputs, task="semantic", threshold=0.9, overlap_mask_area_threshold=0.5): + if task == "instance" and hasattr(self.feature_extractor, "post_process_instance_segmentation"): outputs = self.feature_extractor.post_process_panoptic_segmentation( - model_outputs, object_mask_threshold=threshold + model_outputs, + threshold=threshold, + overlap_mask_area_threshold=overlap_mask_area_threshold, + target_sizes=model_outputs["target_size"], )[0] + annotation = [] segmentation = outputs["segmentation"] - for segment in outputs["segments"]: - mask = (segmentation == segment["id"]) * 255 - mask = Image.fromarray(mask.numpy().astype(np.uint8), mode="L") - label = self.model.config.id2label[segment["label_id"]] - annotation.append({"mask": mask, "label": label, "score": None}) - elif hasattr(self.feature_extractor, "post_process_segmentation"): - # Panoptic - raw_annotations = self.feature_extractor.post_process_segmentation( - model_outputs, model_outputs["target_size"], threshold=threshold, mask_threshold=0.5 - ) - raw_annotation = raw_annotations[0] - raw_annotation["masks"] *= 255 # [0,1] -> [0,255] black and white pixels - raw_annotation["scores"] = raw_annotation["scores"].tolist() - raw_annotation["labels"] = [self.model.config.id2label[label.item()] for label in raw_annotation["labels"]] - raw_annotation["masks"] = [ - Image.fromarray(mask.numpy().astype(np.uint8), mode="L") for mask in raw_annotation["masks"] - ] - # {"scores": [...], ...} --> [{"score":x, ...}, ...] - keys = ["score", "label", "mask"] - annotation = [ - dict(zip(keys, vals)) - for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["masks"]) - ] - else: - # Default logits - logits = model_outputs.logits - logits = logits.softmax(dim=1) - if len(logits.shape) != 4: - raise ValueError(f"Logits don't have expected dimensions, expected [1, N, H, W], got {logits.shape}") - batch_size, num_labels, height, width = logits.shape - expected_num_labels = len(self.model.config.id2label) - if num_labels != expected_num_labels: - raise ValueError( - f"Logits don't have expected dimensions, expected [1, {num_labels}, H, W], got {logits.shape}" - ) - size = model_outputs["target_size"].squeeze(0).tolist() - logits_reshaped = nn.functional.interpolate(logits, size=size, mode="bilinear", align_corners=False) - classes = logits_reshaped.argmax(dim=1)[0] + + if len(outputs["segments_info"]) == 0: + mask = Image.fromarray(np.zeros(segmentation.shape).astype(np.uint8), mode="L") + annotation.append({"mask": mask, "label": None, "score": 0.0}) + else: + for segment in outputs["segments_info"]: + mask = (segmentation == segment["id"]) * 255 + mask = Image.fromarray(mask.numpy().astype(np.uint8), mode="L") + label = self.model.config.id2label[segment["label_id"]] + score = segment["score"] + annotation.append({"mask": mask, "label": label, "score": score}) + + elif task == "panoptic" and hasattr(self.feature_extractor, "post_process_panoptic_segmentation"): + outputs = self.feature_extractor.post_process_panoptic_segmentation( + model_outputs, + threshold=threshold, + overlap_mask_area_threshold=overlap_mask_area_threshold, + target_sizes=model_outputs["target_size"], + )[0] + annotation = [] + segmentation = outputs["segmentation"] + + if len(outputs["segments_info"]) == 0: + mask = Image.fromarray(np.zeros(segmentation.shape).astype(np.uint8), mode="L") + annotation.append({"mask": mask, "label": None, "score": 0.0}) + else: + for segment in outputs["segments_info"]: + mask = (segmentation == segment["id"]) * 255 + mask = Image.fromarray(mask.numpy().astype(np.uint8), mode="L") + label = self.model.config.id2label[segment["label_id"]] + score = segment["score"] + annotation.append({"score": score, "label": label, "mask": mask}) + + elif task == "semantic" and hasattr(self.feature_extractor, "post_process_semantic_segmentation"): + outputs = self.feature_extractor.post_process_semantic_segmentation( + model_outputs, target_sizes=model_outputs["target_size"] + )[0] - for label_id in range(num_labels): - label = self.model.config.id2label[label_id] - mask = classes == label_id - mask_sum = mask.sum() - - # Remove empty masks. - if mask_sum == 0: - continue - mask = Image.fromarray((mask * 255).numpy().astype(np.uint8), mode="L") - # Semantic segmentation does not output a global score for the mask - # so we don't attempt to compute one. - # XXX: We could send a mask with values between 0 and 255 instead - # of a pure mask to enable users to get the probabilities that - # are really outputted by the logits. + annotation = [] + segmentation = outputs.numpy() + labels = np.unique(segmentation) + + for label in labels: + mask = (segmentation == label) * 255 + mask = Image.fromarray(mask, mode="L") + label = self.model.config.id2label[label] annotation.append({"score": None, "label": label, "mask": mask}) + else: + raise ValueError(f"task {task} is not supported for model {self.model}") return annotation diff --git a/tests/pipelines/test_pipelines_image_segmentation.py b/tests/pipelines/test_pipelines_image_segmentation.py index 3841bc1ab78764..65656939d0cce4 100644 --- a/tests/pipelines/test_pipelines_image_segmentation.py +++ b/tests/pipelines/test_pipelines_image_segmentation.py @@ -74,9 +74,6 @@ class ImageSegmentationPipelineTests(unittest.TestCase, metaclass=PipelineTestCa } def get_test_pipeline(self, model, tokenizer, feature_extractor): - # Fix me Alara - if model.__class__.__name__ in ["DetrForSegmentation", "MaskFormerForInstanceSegmentation"]: - return None, None image_segmenter = ImageSegmentationPipeline(model=model, feature_extractor=feature_extractor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", @@ -150,7 +147,7 @@ def test_small_model_tf(self): pass @require_torch - @unittest.skip("Fix me Alara!") + @unittest.skip("No weights found for hf-internal-testing/tiny-detr-mobilenetsv3-panoptic") def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" @@ -158,9 +155,15 @@ def test_small_model_pt(self): feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) image_segmenter = ImageSegmentationPipeline(model=model, feature_extractor=feature_extractor) - outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0) + outputs = image_segmenter( + "http://images.cocodataset.org/val2017/000000039769.jpg", + task="panoptic", + threshold=0.0, + overlap_mask_area_threshold=0.0, + ) + + # Shortening by hashing for o in outputs: - # shortening by hashing o["mask"] = hashimage(o["mask"]) self.assertEqual( @@ -235,12 +238,12 @@ def test_small_model_pt_semantic(self): { "score": None, "label": "LABEL_0", - "mask": "6225140faf502d272af076222776d7e4", + "mask": "775518a7ed09eea888752176c6ba8f38", }, { "score": None, "label": "LABEL_1", - "mask": "8297c9f8eb43ddd3f32a6dae21e015a1", + "mask": "a12da23a46848128af68c63aa8ba7a02", }, ], ) @@ -249,22 +252,28 @@ def test_small_model_pt_semantic(self): @slow def test_integration_torch_image_segmentation(self): model_id = "facebook/detr-resnet-50-panoptic" - image_segmenter = pipeline("image-segmentation", model=model_id) - outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg") + outputs = image_segmenter( + "http://images.cocodataset.org/val2017/000000039769.jpg", + task="panoptic", + threshold=0, + overlap_mask_area_threshold=0.0, + ) + + # Shortening by hashing for o in outputs: o["mask"] = hashimage(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ - {"score": 0.9094, "label": "blanket", "mask": "6500201749480f87154fd967783b2b97"}, - {"score": 0.9941, "label": "cat", "mask": "f3a7f80220788acc0245ebc084df6afc"}, - {"score": 0.9987, "label": "remote", "mask": "7703408f54da1d0ebda47841da875e48"}, - {"score": 0.9995, "label": "remote", "mask": "bd726918f10fed3efaef0091e11f923b"}, - {"score": 0.9722, "label": "couch", "mask": "226d6dcb98bebc3fbc208abdc0c83196"}, - {"score": 0.9994, "label": "cat", "mask": "fa5d8d5c329546ba5339f3095641ef56"}, + {"score": 0.9094, "label": "blanket", "mask": "dcff19a97abd8bd555e21186ae7c066a"}, + {"score": 0.9941, "label": "cat", "mask": "9c0af87bd00f9d3a4e0c8888e34e70e2"}, + {"score": 0.9987, "label": "remote", "mask": "c7870600d6c02a1f6d96470fc7220e8e"}, + {"score": 0.9995, "label": "remote", "mask": "ef899a25fd44ec056c653f0ca2954fdd"}, + {"score": 0.9722, "label": "couch", "mask": "37b8446ac578a17108aa2b7fccc33114"}, + {"score": 0.9994, "label": "cat", "mask": "6a09d3655efd8a388ab4511e4cbbb797"}, ], ) @@ -273,8 +282,12 @@ def test_integration_torch_image_segmentation(self): "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], + task="panoptic", threshold=0.0, + overlap_mask_area_threshold=0.0, ) + + # Shortening by hashing for output in outputs: for o in output: o["mask"] = hashimage(o["mask"]) @@ -283,20 +296,20 @@ def test_integration_torch_image_segmentation(self): nested_simplify(outputs, decimals=4), [ [ - {"score": 0.9094, "label": "blanket", "mask": "6500201749480f87154fd967783b2b97"}, - {"score": 0.9941, "label": "cat", "mask": "f3a7f80220788acc0245ebc084df6afc"}, - {"score": 0.9987, "label": "remote", "mask": "7703408f54da1d0ebda47841da875e48"}, - {"score": 0.9995, "label": "remote", "mask": "bd726918f10fed3efaef0091e11f923b"}, - {"score": 0.9722, "label": "couch", "mask": "226d6dcb98bebc3fbc208abdc0c83196"}, - {"score": 0.9994, "label": "cat", "mask": "fa5d8d5c329546ba5339f3095641ef56"}, + {"score": 0.9094, "label": "blanket", "mask": "dcff19a97abd8bd555e21186ae7c066a"}, + {"score": 0.9941, "label": "cat", "mask": "9c0af87bd00f9d3a4e0c8888e34e70e2"}, + {"score": 0.9987, "label": "remote", "mask": "c7870600d6c02a1f6d96470fc7220e8e"}, + {"score": 0.9995, "label": "remote", "mask": "ef899a25fd44ec056c653f0ca2954fdd"}, + {"score": 0.9722, "label": "couch", "mask": "37b8446ac578a17108aa2b7fccc33114"}, + {"score": 0.9994, "label": "cat", "mask": "6a09d3655efd8a388ab4511e4cbbb797"}, ], [ - {"score": 0.9094, "label": "blanket", "mask": "6500201749480f87154fd967783b2b97"}, - {"score": 0.9941, "label": "cat", "mask": "f3a7f80220788acc0245ebc084df6afc"}, - {"score": 0.9987, "label": "remote", "mask": "7703408f54da1d0ebda47841da875e48"}, - {"score": 0.9995, "label": "remote", "mask": "bd726918f10fed3efaef0091e11f923b"}, - {"score": 0.9722, "label": "couch", "mask": "226d6dcb98bebc3fbc208abdc0c83196"}, - {"score": 0.9994, "label": "cat", "mask": "fa5d8d5c329546ba5339f3095641ef56"}, + {"score": 0.9094, "label": "blanket", "mask": "dcff19a97abd8bd555e21186ae7c066a"}, + {"score": 0.9941, "label": "cat", "mask": "9c0af87bd00f9d3a4e0c8888e34e70e2"}, + {"score": 0.9987, "label": "remote", "mask": "c7870600d6c02a1f6d96470fc7220e8e"}, + {"score": 0.9995, "label": "remote", "mask": "ef899a25fd44ec056c653f0ca2954fdd"}, + {"score": 0.9722, "label": "couch", "mask": "37b8446ac578a17108aa2b7fccc33114"}, + {"score": 0.9994, "label": "cat", "mask": "6a09d3655efd8a388ab4511e4cbbb797"}, ], ], ) @@ -304,12 +317,27 @@ def test_integration_torch_image_segmentation(self): @require_torch @slow def test_threshold(self): - threshold = 0.999 model_id = "facebook/detr-resnet-50-panoptic" - image_segmenter = pipeline("image-segmentation", model=model_id) - outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold) + outputs = image_segmenter( + "http://images.cocodataset.org/val2017/000000039769.jpg", task="panoptic", threshold=0.999 + ) + # Shortening by hashing + for o in outputs: + o["mask"] = hashimage(o["mask"]) + + self.assertEqual( + nested_simplify(outputs, decimals=4), + [ + {"score": 0.9995, "label": "remote", "mask": "d02404f5789f075e3b3174adbc3fd5b8"}, + {"score": 0.9994, "label": "cat", "mask": "eaa115b40c96d3a6f4fe498963a7e470"}, + ], + ) + + outputs = image_segmenter( + "http://images.cocodataset.org/val2017/000000039769.jpg", task="panoptic", threshold=0.5 + ) for o in outputs: o["mask"] = hashimage(o["mask"]) @@ -317,8 +345,11 @@ def test_threshold(self): self.assertEqual( nested_simplify(outputs, decimals=4), [ - {"score": 0.9995, "label": "remote", "mask": "bd726918f10fed3efaef0091e11f923b"}, - {"score": 0.9994, "label": "cat", "mask": "fa5d8d5c329546ba5339f3095641ef56"}, + {"score": 0.9941, "label": "cat", "mask": "9c0af87bd00f9d3a4e0c8888e34e70e2"}, + {"score": 0.9987, "label": "remote", "mask": "c7870600d6c02a1f6d96470fc7220e8e"}, + {"score": 0.9995, "label": "remote", "mask": "ef899a25fd44ec056c653f0ca2954fdd"}, + {"score": 0.9722, "label": "couch", "mask": "37b8446ac578a17108aa2b7fccc33114"}, + {"score": 0.9994, "label": "cat", "mask": "6a09d3655efd8a388ab4511e4cbbb797"}, ], ) @@ -335,20 +366,21 @@ def test_maskformer(self): image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") file = image[0]["file"] - outputs = image_segmenter(file, threshold=threshold) + outputs = image_segmenter(file, task="panoptic", threshold=threshold) + # Shortening by hashing for o in outputs: o["mask"] = hashimage(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ - {"mask": "20d1b9480d1dc1501dbdcfdff483e370", "label": "wall", "score": None}, - {"mask": "0f902fbc66a0ff711ea455b0e4943adf", "label": "house", "score": None}, - {"mask": "4537bdc07d47d84b3f8634b7ada37bd4", "label": "grass", "score": None}, - {"mask": "b7ac77dfae44a904b479a0926a2acaf7", "label": "tree", "score": None}, - {"mask": "e9bedd56bd40650fb263ce03eb621079", "label": "plant", "score": None}, - {"mask": "37a609f8c9c1b8db91fbff269f428b20", "label": "road, route", "score": None}, - {"mask": "0d8cdfd63bae8bf6e4344d460a2fa711", "label": "sky", "score": None}, + {"score": 0.9974, "label": "wall", "mask": "a547b7c062917f4f3e36501827ad3cd6"}, + {"score": 0.949, "label": "house", "mask": "0da9b7b38feac47bd2528a63e5ea7b19"}, + {"score": 0.9995, "label": "grass", "mask": "1d07ea0a263dcf38ca8ae1a15fdceda1"}, + {"score": 0.9976, "label": "tree", "mask": "6cdc97c7daf1dc596fa181f461ddd2ba"}, + {"score": 0.8239, "label": "plant", "mask": "1ab4ce378f6ceff57d428055cfbd742f"}, + {"score": 0.9942, "label": "road, route", "mask": "39c5d17be53b2d1b0f46aad8ebb15813"}, + {"score": 1.0, "label": "sky", "mask": "a3756324a692981510c39b1a59510a36"}, ], ) From 9ac586b3c8636d2216cdfd32d00e40c396e23148 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Date: Fri, 7 Oct 2022 18:01:58 -0400 Subject: [PATCH 503/539] Rework pipeline tests (#19366) * Rework pipeline tests * Try to fix Flax tests * Try to put it before * Use a new decorator instead * Remove ignore marker since it doesn't work * Filter pipeline tests * Woopsie * Use the fitlered list * Clean up and fake modif * Remove init * Revert fake modif --- .circleci/config.yml | 52 +++++++++++-------- .github/workflows/self-scheduled.yml | 8 +-- conftest.py | 1 - src/transformers/testing_utils.py | 32 +++++------- .../test_pipelines_audio_classification.py | 10 +--- ..._pipelines_automatic_speech_recognition.py | 2 - tests/pipelines/test_pipelines_common.py | 8 ++- .../test_pipelines_conversational.py | 3 +- ...t_pipelines_document_question_answering.py | 2 - .../test_pipelines_feature_extraction.py | 3 +- tests/pipelines/test_pipelines_fill_mask.py | 10 +--- .../test_pipelines_image_classification.py | 4 +- .../test_pipelines_image_segmentation.py | 11 +--- .../pipelines/test_pipelines_image_to_text.py | 3 +- .../test_pipelines_object_detection.py | 11 +--- .../test_pipelines_question_answering.py | 5 +- .../pipelines/test_pipelines_summarization.py | 3 +- ...test_pipelines_table_question_answering.py | 2 - .../test_pipelines_text2text_generation.py | 3 +- .../test_pipelines_text_classification.py | 3 +- .../test_pipelines_text_generation.py | 4 +- .../test_pipelines_token_classification.py | 11 +--- tests/pipelines/test_pipelines_translation.py | 4 +- ...est_pipelines_visual_question_answering.py | 10 +--- tests/pipelines/test_pipelines_zero_shot.py | 3 +- ...ipelines_zero_shot_image_classification.py | 10 +--- utils/tests_fetcher.py | 26 ++++++++++ 27 files changed, 95 insertions(+), 149 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index aef10586cdc56b..f54a599808830f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -79,10 +79,19 @@ jobs: path: ~/transformers/tests_fetched_summary.txt - run: | if [ -f test_list.txt ]; then - mv test_list.txt test_preparation/test_list.txt + cp test_list.txt test_preparation/test_list.txt else touch test_preparation/test_list.txt fi + - run: python utils/tests_fetcher.py --filter_pipeline_tests + - run: | + if [ -f test_list.txt ]; then + mv test_list.txt test_preparation/filtered_test_list.txt + else + touch test_preparation/filtered_test_list.txt + fi + - store_artifacts: + path: ~/transformers/test_preparation/filtered_test_list.txt - run: python utils/tests_fetcher.py --filters tests examples | tee examples_tests_fetched_summary.txt - store_artifacts: path: ~/transformers/examples_tests_fetched_summary.txt @@ -97,6 +106,7 @@ jobs: root: test_preparation/ paths: test_list.txt + filtered_test_list.txt examples_test_list.txt # To run all tests for the nightly build @@ -110,6 +120,8 @@ jobs: mkdir test_preparation echo "tests" > test_preparation/test_list.txt echo "tests" > test_preparation/examples_test_list.txt + - run: python utils/tests_fetcher.py --filter_pipeline_tests + - run: mv test_list.txt test_preparation/filtered_test_list.txt - persist_to_workspace: root: test_preparation/ @@ -132,7 +144,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/filtered_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -152,7 +164,7 @@ jobs: key: v0.5-torch_and_tf-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/filtered_test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -174,7 +186,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/filtered_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -192,7 +204,7 @@ jobs: key: v0.5-torch_and_flax-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/filtered_test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -213,7 +225,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/filtered_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -231,7 +243,7 @@ jobs: key: v0.5-torch-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/test_list.txt) | tee tests_output.txt + - run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -252,7 +264,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/filtered_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -269,7 +281,7 @@ jobs: key: v0.5-tf-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/test_list.txt) | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -290,7 +302,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/filtered_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -306,7 +318,7 @@ jobs: key: v0.5-flax-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/test_list.txt) | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -318,7 +330,6 @@ jobs: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 - RUN_PIPELINE_TESTS: yes TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge @@ -345,7 +356,7 @@ jobs: key: v0.5-torch-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test $(cat test_preparation/test_list.txt) | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch tests/pipelines | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -357,7 +368,6 @@ jobs: - image: cimg/python:3.7.12 environment: OMP_NUM_THREADS: 1 - RUN_PIPELINE_TESTS: yes TRANSFORMERS_IS_CI: yes PYTEST_TIMEOUT: 120 resource_class: xlarge @@ -382,7 +392,7 @@ jobs: key: v0.5-tf-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf $(cat test_preparation/test_list.txt) -m is_pipeline_test | tee tests_output.txt + - run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf tests/pipelines | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -401,7 +411,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/filtered_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -557,7 +567,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/filtered_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -575,7 +585,7 @@ jobs: key: v0.5-hub-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/test_list.txt) -m is_staging_test | tee tests_output.txt + - run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/filtered_test_list.txt) -m is_staging_test | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt - store_artifacts: @@ -596,7 +606,7 @@ jobs: - attach_workspace: at: ~/transformers/test_preparation - run: | - if [ ! -s test_preparation/test_list.txt ]; then + if [ ! -s test_preparation/filtered_test_list.txt ]; then echo "No tests to run, exiting early!" circleci-agent step halt fi @@ -610,7 +620,7 @@ jobs: key: v0.5-onnx-{{ checksum "setup.py" }} paths: - '~/.cache/pip' - - run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/test_list.txt) -k onnx | tee tests_output.txt + - run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/filtered_test_list.txt) -k onnx | tee tests_output.txt - store_artifacts: path: ~/transformers/tests_output.txt @@ -690,7 +700,7 @@ jobs: steps: - checkout - attach_workspace: - at: ~/transformers/test_preparation + at: ~/transformers/filtered_test_list.txt - run: | if [ ! -s test_preparation/test_list.txt ]; then echo "No tests to run, exiting early!" diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index 7de69a573e3852..3b91eac50d536e 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -256,10 +256,8 @@ jobs: - name: Run all pipeline tests on GPU working-directory: /transformers - env: - RUN_PIPELINE_TESTS: yes run: | - python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests + python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests/pipelines - name: Failure short reports if: ${{ failure() }} @@ -301,10 +299,8 @@ jobs: - name: Run all pipeline tests on GPU working-directory: /transformers - env: - RUN_PIPELINE_TESTS: yes run: | - python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests + python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests/pipelines - name: Failure short reports if: ${{ always() }} diff --git a/conftest.py b/conftest.py index e71ada998a6df9..c3d4f70326d90c 100644 --- a/conftest.py +++ b/conftest.py @@ -32,7 +32,6 @@ def pytest_configure(config): - config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipeline are tested") config.addinivalue_line( "markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) diff --git a/src/transformers/testing_utils.py b/src/transformers/testing_utils.py index 08409b6e092d33..5884e642d9b779 100644 --- a/src/transformers/testing_utils.py +++ b/src/transformers/testing_utils.py @@ -133,7 +133,6 @@ def parse_int_from_env(key, default=None): _run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=False) _run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False) _run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False) -_run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=False) _run_git_lfs_tests = parse_flag_from_env("RUN_GIT_LFS_TESTS", default=False) _tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None) @@ -176,25 +175,6 @@ def is_pt_flax_cross_test(test_case): return pytest.mark.is_pt_flax_cross_test()(test_case) -def is_pipeline_test(test_case): - """ - Decorator marking a test as a pipeline test. - - Pipeline tests are skipped by default and we can run only them by setting RUN_PIPELINE_TESTS environment variable - to a truthy value and selecting the is_pipeline_test pytest mark. - - """ - if not _run_pipeline_tests: - return unittest.skip("test is pipeline test")(test_case) - else: - try: - import pytest # We don't need a hard dependency on pytest in the main library - except ImportError: - return test_case - else: - return pytest.mark.is_pipeline_test()(test_case) - - def is_staging_test(test_case): """ Decorator marking a test as a staging test. @@ -309,6 +289,18 @@ def require_torch(test_case): return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case) +def require_torch_or_tf(test_case): + """ + Decorator marking a test that requires PyTorch or TensorFlow. + + These tests are skipped when neither PyTorch not TensorFlow is installed. + + """ + return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")( + test_case + ) + + def require_intel_extension_for_pytorch(test_case): """ Decorator marking a test that requires Intel Extension for PyTorch. diff --git a/tests/pipelines/test_pipelines_audio_classification.py b/tests/pipelines/test_pipelines_audio_classification.py index df32cf58d3502b..3f957132fd825d 100644 --- a/tests/pipelines/test_pipelines_audio_classification.py +++ b/tests/pipelines/test_pipelines_audio_classification.py @@ -18,19 +18,11 @@ from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline -from transformers.testing_utils import ( - is_pipeline_test, - nested_simplify, - require_tf, - require_torch, - require_torchaudio, - slow, -) +from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torchaudio, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta -@is_pipeline_test @require_torch class AudioClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING diff --git a/tests/pipelines/test_pipelines_automatic_speech_recognition.py b/tests/pipelines/test_pipelines_automatic_speech_recognition.py index 0523639cc4fe85..d4fcbf5f78146c 100644 --- a/tests/pipelines/test_pipelines_automatic_speech_recognition.py +++ b/tests/pipelines/test_pipelines_automatic_speech_recognition.py @@ -31,7 +31,6 @@ from transformers.pipelines.audio_utils import chunk_bytes_iter from transformers.pipelines.automatic_speech_recognition import chunk_iter from transformers.testing_utils import ( - is_pipeline_test, is_torch_available, nested_simplify, require_pyctcdecode, @@ -52,7 +51,6 @@ # from .test_pipelines_common import CustomInputPipelineCommonMixin -@is_pipeline_test class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = { k: v diff --git a/tests/pipelines/test_pipelines_common.py b/tests/pipelines/test_pipelines_common.py index 34684186b54d4e..0f03a42440d79c 100644 --- a/tests/pipelines/test_pipelines_common.py +++ b/tests/pipelines/test_pipelines_common.py @@ -48,13 +48,13 @@ USER, CaptureLogger, RequestCounter, - is_pipeline_test, is_staging_test, nested_simplify, require_scatter, require_tensorflow_probability, require_tf, require_torch, + require_torch_or_tf, slow, ) from transformers.utils import is_tf_available, is_torch_available @@ -307,7 +307,6 @@ def inner(self): return type.__new__(mcs, name, bases, dct) -@is_pipeline_test class CommonPipelineTest(unittest.TestCase): @require_torch def test_pipeline_iteration(self): @@ -416,7 +415,6 @@ def test_unbatch_attentions_hidden_states(self): self.assertEqual(len(outputs), 20) -@is_pipeline_test class PipelinePadTest(unittest.TestCase): @require_torch def test_pipeline_padding(self): @@ -498,7 +496,6 @@ def test_pipeline_offset_mapping(self): ) -@is_pipeline_test class PipelineUtilsTest(unittest.TestCase): @require_torch def test_pipeline_dataset(self): @@ -795,7 +792,6 @@ def postprocess(self, model_outputs): return model_outputs["logits"].softmax(-1).numpy() -@is_pipeline_test class CustomPipelineTest(unittest.TestCase): def test_warning_logs(self): transformers_logging.set_verbosity_debug() @@ -835,6 +831,7 @@ def test_register_pipeline(self): # Clean registry for next tests. del PIPELINE_REGISTRY.supported_tasks["custom-text-classification"] + @require_torch_or_tf def test_dynamic_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "pair-classification", @@ -886,6 +883,7 @@ def test_dynamic_pipeline(self): [{"label": "LABEL_0", "score": 0.505}], ) + @require_torch_or_tf def test_cached_pipeline_has_minimum_calls_to_head(self): # Make sure we have cached the pipeline. _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") diff --git a/tests/pipelines/test_pipelines_conversational.py b/tests/pipelines/test_pipelines_conversational.py index 342a09e2e697a4..39ad2175dcff56 100644 --- a/tests/pipelines/test_pipelines_conversational.py +++ b/tests/pipelines/test_pipelines_conversational.py @@ -29,7 +29,7 @@ TFAutoModelForCausalLM, pipeline, ) -from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow, torch_device +from transformers.testing_utils import require_tf, require_torch, slow, torch_device from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -37,7 +37,6 @@ DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0 -@is_pipeline_test class ConversationalPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = dict( list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) diff --git a/tests/pipelines/test_pipelines_document_question_answering.py b/tests/pipelines/test_pipelines_document_question_answering.py index 92d618bfd67104..bea8335c5c8e57 100644 --- a/tests/pipelines/test_pipelines_document_question_answering.py +++ b/tests/pipelines/test_pipelines_document_question_answering.py @@ -18,7 +18,6 @@ from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( - is_pipeline_test, nested_simplify, require_detectron2, require_pytesseract, @@ -53,7 +52,6 @@ def load_image(_): ) -@is_pipeline_test @require_torch @require_vision class DocumentQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): diff --git a/tests/pipelines/test_pipelines_feature_extraction.py b/tests/pipelines/test_pipelines_feature_extraction.py index 42cdb79bb666b9..7fab49dd60423d 100644 --- a/tests/pipelines/test_pipelines_feature_extraction.py +++ b/tests/pipelines/test_pipelines_feature_extraction.py @@ -22,12 +22,11 @@ LxmertConfig, pipeline, ) -from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch +from transformers.testing_utils import nested_simplify, require_tf, require_torch from .test_pipelines_common import PipelineTestCaseMeta -@is_pipeline_test class FeatureExtractionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_MAPPING tf_model_mapping = TF_MODEL_MAPPING diff --git a/tests/pipelines/test_pipelines_fill_mask.py b/tests/pipelines/test_pipelines_fill_mask.py index d85ab8d7ce32a6..760c475524f171 100644 --- a/tests/pipelines/test_pipelines_fill_mask.py +++ b/tests/pipelines/test_pipelines_fill_mask.py @@ -16,19 +16,11 @@ from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException -from transformers.testing_utils import ( - is_pipeline_test, - nested_simplify, - require_tf, - require_torch, - require_torch_gpu, - slow, -) +from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_gpu, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta -@is_pipeline_test class FillMaskPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_MASKED_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING diff --git a/tests/pipelines/test_pipelines_image_classification.py b/tests/pipelines/test_pipelines_image_classification.py index 8e19d60e80fb70..59a58ff44d581e 100644 --- a/tests/pipelines/test_pipelines_image_classification.py +++ b/tests/pipelines/test_pipelines_image_classification.py @@ -22,10 +22,10 @@ ) from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.testing_utils import ( - is_pipeline_test, nested_simplify, require_tf, require_torch, + require_torch_or_tf, require_vision, slow, ) @@ -43,7 +43,7 @@ def open(*args, **kwargs): pass -@is_pipeline_test +@require_torch_or_tf @require_vision class ImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING diff --git a/tests/pipelines/test_pipelines_image_segmentation.py b/tests/pipelines/test_pipelines_image_segmentation.py index 65656939d0cce4..3d7d067afa797f 100644 --- a/tests/pipelines/test_pipelines_image_segmentation.py +++ b/tests/pipelines/test_pipelines_image_segmentation.py @@ -31,15 +31,7 @@ is_vision_available, pipeline, ) -from transformers.testing_utils import ( - is_pipeline_test, - nested_simplify, - require_tf, - require_timm, - require_torch, - require_vision, - slow, -) +from transformers.testing_utils import nested_simplify, require_tf, require_timm, require_torch, require_vision, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -62,7 +54,6 @@ def hashimage(image: Image) -> str: @require_vision @require_timm @require_torch -@is_pipeline_test class ImageSegmentationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = { k: v diff --git a/tests/pipelines/test_pipelines_image_to_text.py b/tests/pipelines/test_pipelines_image_to_text.py index 897c3b2e47250d..325030330030ef 100644 --- a/tests/pipelines/test_pipelines_image_to_text.py +++ b/tests/pipelines/test_pipelines_image_to_text.py @@ -16,7 +16,7 @@ from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available from transformers.pipelines import pipeline -from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, require_vision, slow +from transformers.testing_utils import require_tf, require_torch, require_vision, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -31,7 +31,6 @@ def open(*args, **kwargs): pass -@is_pipeline_test @require_vision class ImageToTextPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING diff --git a/tests/pipelines/test_pipelines_object_detection.py b/tests/pipelines/test_pipelines_object_detection.py index b1d43f8a8795c6..ebefcaab61f6ee 100644 --- a/tests/pipelines/test_pipelines_object_detection.py +++ b/tests/pipelines/test_pipelines_object_detection.py @@ -22,15 +22,7 @@ is_vision_available, pipeline, ) -from transformers.testing_utils import ( - is_pipeline_test, - nested_simplify, - require_tf, - require_timm, - require_torch, - require_vision, - slow, -) +from transformers.testing_utils import nested_simplify, require_tf, require_timm, require_torch, require_vision, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -48,7 +40,6 @@ def open(*args, **kwargs): @require_vision @require_timm @require_torch -@is_pipeline_test class ObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING diff --git a/tests/pipelines/test_pipelines_question_answering.py b/tests/pipelines/test_pipelines_question_answering.py index 001254aa94b01e..fb0f6cc8817022 100644 --- a/tests/pipelines/test_pipelines_question_answering.py +++ b/tests/pipelines/test_pipelines_question_answering.py @@ -22,12 +22,11 @@ ) from transformers.data.processors.squad import SquadExample from transformers.pipelines import QuestionAnsweringArgumentHandler, pipeline -from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow +from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_or_tf, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta -@is_pipeline_test class QAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING @@ -345,7 +344,7 @@ def test_large_model_tf(self): self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"}) -@is_pipeline_test +@require_torch_or_tf class QuestionAnsweringArgumentHandlerTests(unittest.TestCase): def test_argument_handler(self): qa = QuestionAnsweringArgumentHandler() diff --git a/tests/pipelines/test_pipelines_summarization.py b/tests/pipelines/test_pipelines_summarization.py index d797383811c6ae..50e8315a5f1e0b 100644 --- a/tests/pipelines/test_pipelines_summarization.py +++ b/tests/pipelines/test_pipelines_summarization.py @@ -23,7 +23,7 @@ T5Config, pipeline, ) -from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow, torch_device +from transformers.testing_utils import require_tf, require_torch, slow, torch_device from transformers.tokenization_utils import TruncationStrategy from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -32,7 +32,6 @@ DEFAULT_DEVICE_NUM = -1 if torch_device == "cpu" else 0 -@is_pipeline_test class SummarizationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING diff --git a/tests/pipelines/test_pipelines_table_question_answering.py b/tests/pipelines/test_pipelines_table_question_answering.py index ba7fdaa75c5017..089186a4672c1f 100644 --- a/tests/pipelines/test_pipelines_table_question_answering.py +++ b/tests/pipelines/test_pipelines_table_question_answering.py @@ -23,7 +23,6 @@ pipeline, ) from transformers.testing_utils import ( - is_pipeline_test, require_pandas, require_tensorflow_probability, require_tf, @@ -35,7 +34,6 @@ from .test_pipelines_common import PipelineTestCaseMeta -@is_pipeline_test class TQAPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): # Putting it there for consistency, but TQA do not have fast tokenizer # which are needed to generate automatic tests diff --git a/tests/pipelines/test_pipelines_text2text_generation.py b/tests/pipelines/test_pipelines_text2text_generation.py index 4490c5716220db..772190fb63e462 100644 --- a/tests/pipelines/test_pipelines_text2text_generation.py +++ b/tests/pipelines/test_pipelines_text2text_generation.py @@ -20,7 +20,7 @@ Text2TextGenerationPipeline, pipeline, ) -from transformers.testing_utils import is_pipeline_test, require_tf, require_torch +from transformers.testing_utils import require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -30,7 +30,6 @@ import torch -@is_pipeline_test class Text2TextGenerationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING diff --git a/tests/pipelines/test_pipelines_text_classification.py b/tests/pipelines/test_pipelines_text_classification.py index 6bbc84989a211d..80e8e2559f171b 100644 --- a/tests/pipelines/test_pipelines_text_classification.py +++ b/tests/pipelines/test_pipelines_text_classification.py @@ -20,12 +20,11 @@ TextClassificationPipeline, pipeline, ) -from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow +from transformers.testing_utils import nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta -@is_pipeline_test class TextClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING diff --git a/tests/pipelines/test_pipelines_text_generation.py b/tests/pipelines/test_pipelines_text_generation.py index ac6d122559ee5d..ca0e1011584574 100644 --- a/tests/pipelines/test_pipelines_text_generation.py +++ b/tests/pipelines/test_pipelines_text_generation.py @@ -16,17 +16,17 @@ from transformers import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, pipeline from transformers.testing_utils import ( - is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, + require_torch_or_tf, ) from .test_pipelines_common import ANY, PipelineTestCaseMeta -@is_pipeline_test +@require_torch_or_tf class TextGenerationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING diff --git a/tests/pipelines/test_pipelines_token_classification.py b/tests/pipelines/test_pipelines_token_classification.py index bc4eaef06255e3..ff86e7106ae5bb 100644 --- a/tests/pipelines/test_pipelines_token_classification.py +++ b/tests/pipelines/test_pipelines_token_classification.py @@ -25,14 +25,7 @@ pipeline, ) from transformers.pipelines import AggregationStrategy, TokenClassificationArgumentHandler -from transformers.testing_utils import ( - is_pipeline_test, - nested_simplify, - require_tf, - require_torch, - require_torch_gpu, - slow, -) +from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_torch_gpu, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -40,7 +33,6 @@ VALID_INPUTS = ["A simple string", ["list of strings", "A simple string that is quite a bit longer"]] -@is_pipeline_test class TokenClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING @@ -770,7 +762,6 @@ def test_simple(self): ) -@is_pipeline_test class TokenClassificationArgumentHandlerTestCase(unittest.TestCase): def setUp(self): self.args_parser = TokenClassificationArgumentHandler() diff --git a/tests/pipelines/test_pipelines_translation.py b/tests/pipelines/test_pipelines_translation.py index 3c5999f36e60dc..d8de606f698d94 100644 --- a/tests/pipelines/test_pipelines_translation.py +++ b/tests/pipelines/test_pipelines_translation.py @@ -25,12 +25,11 @@ TranslationPipeline, pipeline, ) -from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow +from transformers.testing_utils import require_tf, require_torch, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta -@is_pipeline_test class TranslationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING @@ -118,7 +117,6 @@ def test_en_to_de_tf(self): ) -@is_pipeline_test class TranslationNewFormatPipelineTests(unittest.TestCase): @require_torch @slow diff --git a/tests/pipelines/test_pipelines_visual_question_answering.py b/tests/pipelines/test_pipelines_visual_question_answering.py index d3315681f47ebb..bf3a532b10cb66 100644 --- a/tests/pipelines/test_pipelines_visual_question_answering.py +++ b/tests/pipelines/test_pipelines_visual_question_answering.py @@ -16,14 +16,7 @@ from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline -from transformers.testing_utils import ( - is_pipeline_test, - nested_simplify, - require_tf, - require_torch, - require_vision, - slow, -) +from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -38,7 +31,6 @@ def open(*args, **kwargs): pass -@is_pipeline_test @require_torch @require_vision class VisualQuestionAnsweringPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): diff --git a/tests/pipelines/test_pipelines_zero_shot.py b/tests/pipelines/test_pipelines_zero_shot.py index af98ac02017205..5c78db1aa96172 100644 --- a/tests/pipelines/test_pipelines_zero_shot.py +++ b/tests/pipelines/test_pipelines_zero_shot.py @@ -21,12 +21,11 @@ ZeroShotClassificationPipeline, pipeline, ) -from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow +from transformers.testing_utils import nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta -@is_pipeline_test class ZeroShotClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING diff --git a/tests/pipelines/test_pipelines_zero_shot_image_classification.py b/tests/pipelines/test_pipelines_zero_shot_image_classification.py index a5aef5c35bd06d..d0396f4e9a9998 100644 --- a/tests/pipelines/test_pipelines_zero_shot_image_classification.py +++ b/tests/pipelines/test_pipelines_zero_shot_image_classification.py @@ -16,14 +16,7 @@ from transformers import is_vision_available from transformers.pipelines import pipeline -from transformers.testing_utils import ( - is_pipeline_test, - nested_simplify, - require_tf, - require_torch, - require_vision, - slow, -) +from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -39,7 +32,6 @@ def open(*args, **kwargs): @require_vision -@is_pipeline_test class ZeroShotImageClassificationPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLIP would be there for now. diff --git a/utils/tests_fetcher.py b/utils/tests_fetcher.py index 0af1a8ad8eb735..f9e0f86af5bc91 100644 --- a/utils/tests_fetcher.py +++ b/utils/tests_fetcher.py @@ -619,6 +619,25 @@ def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None, j json.dump(test_map, fp, ensure_ascii=False) +def filter_pipeline_tests(output_file): + if not os.path.isfile(output_file): + print("No test file found.") + return + with open(output_file, "r", encoding="utf-8") as f: + test_files = f.read().split(" ") + + if len(test_files) == 0: + print("No tests to filter.") + return + if test_files == ["tests"]: + test_files = [os.path.join("tests", f) for f in os.listdir("tests") if f not in ["__init__.py", "pipelines"]] + else: + test_files = [f for f in test_files if not f.startswith(os.path.join("tests", "pipelines"))] + + with open(output_file, "w", encoding="utf-8") as f: + f.write(" ".join(test_files)) + + if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( @@ -645,6 +664,11 @@ def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None, j default=["tests"], help="Only keep the test files matching one of those filters.", ) + parser.add_argument( + "--filter_pipeline_tests", + action="store_true", + help="Will filter the pipeline tests outside of the generated list of tests.", + ) parser.add_argument( "--print_dependencies_of", type=str, @@ -656,6 +680,8 @@ def infer_tests_to_run(output_file, diff_with_last_commit=False, filters=None, j print_tree_deps_of(args.print_dependencies_of) elif args.sanity_check: sanity_check() + elif args.filter_pipeline_tests: + filter_pipeline_tests(args.output_file) else: repo = Repo(PATH_TO_TRANFORMERS) From d92e22d1f28324f513f3080e5c47c071a3916721 Mon Sep 17 00:00:00 2001 From: Sylvain Gugger Date: Fri, 7 Oct 2022 21:38:07 -0400 Subject: [PATCH 504/539] Remove ref to is_pipeline_test --- .../test_pipelines_zero_shot_object_detection.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/tests/pipelines/test_pipelines_zero_shot_object_detection.py b/tests/pipelines/test_pipelines_zero_shot_object_detection.py index 10b7e799cc52bd..aef64e7db27b0f 100644 --- a/tests/pipelines/test_pipelines_zero_shot_object_detection.py +++ b/tests/pipelines/test_pipelines_zero_shot_object_detection.py @@ -15,14 +15,7 @@ import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline -from transformers.testing_utils import ( - is_pipeline_test, - nested_simplify, - require_tf, - require_torch, - require_vision, - slow, -) +from transformers.testing_utils import nested_simplify, require_tf, require_torch, require_vision, slow from .test_pipelines_common import ANY, PipelineTestCaseMeta @@ -39,7 +32,6 @@ def open(*args, **kwargs): @require_vision @require_torch -@is_pipeline_test class ZeroShotObjectDetectionPipelineTests(unittest.TestCase, metaclass=PipelineTestCaseMeta): model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING From 8b6bba54a707a718149f11c2b0631e6ccd715fba Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 10 Oct 2022 08:51:30 +0100 Subject: [PATCH 505/539] Fix `ViTMSNForImageClassification` doctest (#19275) Co-authored-by: ydshieh --- src/transformers/models/vit_msn/modeling_vit_msn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/models/vit_msn/modeling_vit_msn.py b/src/transformers/models/vit_msn/modeling_vit_msn.py index f40d5278c06be1..53969ef930af98 100644 --- a/src/transformers/models/vit_msn/modeling_vit_msn.py +++ b/src/transformers/models/vit_msn/modeling_vit_msn.py @@ -632,7 +632,7 @@ def forward( >>> from PIL import Image >>> import requests - >>> torch.manual_seed(2) + >>> torch.manual_seed(2) # doctest: +IGNORE_RESULT >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) From cbb8a37929c3860210f95c9ec99b8b84b8cf57a1 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 10 Oct 2022 09:05:30 +0100 Subject: [PATCH 506/539] Skip `BloomEmbeddingTest.test_embeddings` for PyTorch < 1.10 (#19261) Co-authored-by: ydshieh --- tests/models/bloom/test_modeling_bloom.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/models/bloom/test_modeling_bloom.py b/tests/models/bloom/test_modeling_bloom.py index 06cec20456f533..9858a390faf3ba 100644 --- a/tests/models/bloom/test_modeling_bloom.py +++ b/tests/models/bloom/test_modeling_bloom.py @@ -37,6 +37,7 @@ BloomModel, BloomTokenizerFast, ) + from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_10 @require_torch @@ -500,9 +501,14 @@ def setUp(self): super().setUp() self.path_bigscience_model = "bigscience/bigscience-small-testing" + @unittest.skipIf( + not is_torch_available() or not is_torch_greater_or_equal_than_1_10, + "Test failed with torch < 1.10 (`LayerNormKernelImpl` not implemented for `BFloat16`)", + ) @require_torch def test_embeddings(self): - model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype="auto") # load in fp32 + # The config in this checkpoint has `bfloat16` as `torch_dtype` -> model in `bfloat16` + model = BloomForCausalLM.from_pretrained(self.path_bigscience_model, torch_dtype="auto") model.eval() EMBEDDINGS_DS_BEFORE_LN_BF_16_MEAN = { From 4107445a0ffbb5a08587307af3980117341311c1 Mon Sep 17 00:00:00 2001 From: Matt Date: Mon, 10 Oct 2022 13:20:00 +0100 Subject: [PATCH 507/539] Fix repo names for ESM tests (#19451) --- tests/models/esm/test_modeling_esm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/models/esm/test_modeling_esm.py b/tests/models/esm/test_modeling_esm.py index 7bd0a36c8b0cb8..dce9cb69e804a2 100644 --- a/tests/models/esm/test_modeling_esm.py +++ b/tests/models/esm/test_modeling_esm.py @@ -245,7 +245,7 @@ def test_create_position_ids_from_inputs_embeds(self): class EsmModelIntegrationTest(TestCasePlus): @slow def test_inference_masked_lm(self): - model = EsmForMaskedLM.from_pretrained("Rocketknight1/esm-2-8m") + model = EsmForMaskedLM.from_pretrained("Rocketknight1/esm2_t6_8M_UR50D") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] @@ -261,7 +261,7 @@ def test_inference_masked_lm(self): @slow def test_inference_no_head(self): - model = EsmModel.from_pretrained("Rocketknight1/esm-2-8m") + model = EsmModel.from_pretrained("Rocketknight1/esm2_t6_8M_UR50D") input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) output = model(input_ids)[0] @@ -276,7 +276,7 @@ def test_lm_head_ignore_keys(self): keys_to_ignore_on_save_tied = [r"lm_head.decoder.weight", r"lm_head.decoder.bias"] keys_to_ignore_on_save_untied = [r"lm_head.decoder.bias"] - config = EsmConfig.from_pretrained("Rocketknight1/esm-2-8m") + config = EsmConfig.from_pretrained("Rocketknight1/esm2_t6_8M_UR50D") config_tied = deepcopy(config) config_tied.tie_word_embeddings = True config_untied = deepcopy(config) From 1241a4993bd9aa46f6be9ca37c6c8771bca9aea7 Mon Sep 17 00:00:00 2001 From: Druhin Abrol Date: Mon, 10 Oct 2022 18:14:59 +0530 Subject: [PATCH 508/539] remove RobertaConfig inheritance from MarkupLMConfig (#19404) * remove RobertaConfig inheritance from MarkupLMConfig * Update src/transformers/models/markuplm/configuration_markuplm.py fixed typo in docstring Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> --- .../models/markuplm/configuration_markuplm.py | 39 ++++++++++++------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/src/transformers/models/markuplm/configuration_markuplm.py b/src/transformers/models/markuplm/configuration_markuplm.py index a7676d7db4bba2..db434ef6eb7ae1 100644 --- a/src/transformers/models/markuplm/configuration_markuplm.py +++ b/src/transformers/models/markuplm/configuration_markuplm.py @@ -14,9 +14,10 @@ # limitations under the License. """ MarkupLM model configuration""" -from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.utils import logging +from ...configuration_utils import PretrainedConfig + logger = logging.get_logger(__name__) @@ -26,7 +27,7 @@ } -class MarkupLMConfig(RobertaConfig): +class MarkupLMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MarkupLMModel`]. It is used to instantiate a MarkupLM model according to the specified arguments, defining the model architecture. Instantiating a configuration @@ -116,6 +117,8 @@ def __init__( initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, + bos_token_id=0, + eos_token_id=2, gradient_checkpointing=False, max_xpath_tag_unit_embeddings=256, max_xpath_subs_unit_embeddings=1024, @@ -123,25 +126,33 @@ def __init__( subs_pad_id=1001, xpath_unit_hidden_size=32, max_depth=50, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, **kwargs ): super().__init__( - vocab_size=vocab_size, - hidden_size=hidden_size, - num_hidden_layers=num_hidden_layers, - num_attention_heads=num_attention_heads, - intermediate_size=intermediate_size, - hidden_act=hidden_act, - hidden_dropout_prob=hidden_dropout_prob, - attention_probs_dropout_prob=attention_probs_dropout_prob, - max_position_embeddings=max_position_embeddings, - type_vocab_size=type_vocab_size, - initializer_range=initializer_range, - layer_norm_eps=layer_norm_eps, pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, gradient_checkpointing=gradient_checkpointing, **kwargs, ) + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout # additional properties self.max_depth = max_depth self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings From 83dc49b69b6a6d904d4dd6c90534ea37351af439 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dar=C3=ADo=20Here=C3=B1=C3=BA?= Date: Mon, 10 Oct 2022 09:47:14 -0300 Subject: [PATCH 509/539] Backtick fixed (paragraph 68) (#19440) --- docs/source/es/pipeline_tutorial.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/es/pipeline_tutorial.mdx b/docs/source/es/pipeline_tutorial.mdx index 4e3c70748ed9df..ff668774e2a352 100644 --- a/docs/source/es/pipeline_tutorial.mdx +++ b/docs/source/es/pipeline_tutorial.mdx @@ -65,7 +65,7 @@ Cualquier parámetro adicional para tu tarea también se puede incluir en el [`p ### Selecciona un modelo y un tokenizador -El [`pipeline`] acepta cualquier modelo del [Model Hub](https://huggingface.co/models). Hay etiquetas en el Model Hub que te permiten filtrar por el modelo que te gustaría utilizar para tu tarea. Una vez que hayas elegido un modelo apropiado, cárgalo con la clase `AutoModelFor` y [`AutoTokenizer'] correspondientes. Por ejemplo, carga la clase [`AutoModelForCausalLM`] para una tarea de modelado de lenguaje causal: +El [`pipeline`] acepta cualquier modelo del [Model Hub](https://huggingface.co/models). Hay etiquetas en el Model Hub que te permiten filtrar por el modelo que te gustaría utilizar para tu tarea. Una vez que hayas elegido un modelo apropiado, cárgalo con la clase `AutoModelFor` y [`AutoTokenizer`] correspondientes. Por ejemplo, carga la clase [`AutoModelForCausalLM`] para una tarea de modelado de lenguaje causal: ```py >>> from transformers import AutoTokenizer, AutoModelForCausalLM From 341070573019b594ed5186cbc5309f5226ead9f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dar=C3=ADo=20Here=C3=B1=C3=BA?= Date: Mon, 10 Oct 2022 10:08:34 -0300 Subject: [PATCH 510/539] Fixed duplicated line (paragraph #83) Documentation: @sgugger (#19436) * Fixed duplicated line (paragraph #83) @omarespejel @sgugger * Datasets map denomination fixed (paragraph 42) --- docs/source/es/training.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/es/training.mdx b/docs/source/es/training.mdx index eefe96f9e80d8d..0679f0444ee930 100644 --- a/docs/source/es/training.mdx +++ b/docs/source/es/training.mdx @@ -39,7 +39,7 @@ Comienza cargando el dataset de [Yelp Reviews](https://huggingface.co/datasets/y 'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'} ``` -Como ya sabes, necesitas un tokenizador para procesar el texto e incluir una estrategia para el padding y el truncamiento para manejar cualquier longitud de secuencia variable. Para procesar tu dataset en un solo paso, utiliza el método de 🤗 Datasets mappara aplicar una función de preprocesamiento sobre todo el dataset: +Como ya sabes, necesitas un tokenizador para procesar el texto e incluir una estrategia para el padding y el truncamiento para manejar cualquier longitud de secuencia variable. Para procesar tu dataset en un solo paso, utiliza el método de 🤗 Datasets map para aplicar una función de preprocesamiento sobre todo el dataset: ```py >>> from transformers import AutoTokenizer @@ -80,7 +80,7 @@ Comienza cargando tu modelo y especifica el número de labels previstas. A parti Verás una advertencia acerca de que algunos de los pesos pre-entrenados no están siendo utilizados y que algunos pesos están siendo inicializados al azar. No te preocupes, esto es completamente normal. -No te preocupes, esto es completamente normal. El head/cabezal pre-entrenado del modelo BERT se descarta y se sustituye por un head de clasificación inicializado aleatoriamente. Puedes aplicar fine-tuning a este nuevo head del modelo en tu tarea de clasificación de secuencias haciendo transfer learning del modelo pre-entrenado. +El head/cabezal pre-entrenado del modelo BERT se descarta y se sustituye por un head de clasificación inicializado aleatoriamente. Puedes aplicar fine-tuning a este nuevo head del modelo en tu tarea de clasificación de secuencias haciendo transfer learning del modelo pre-entrenado. From c523a86929d26dfa97fad1f50a00f3e5e7f22e1b Mon Sep 17 00:00:00 2001 From: Rak Alexey Date: Mon, 10 Oct 2022 16:11:29 +0300 Subject: [PATCH 511/539] fix marianMT convertion to onnx (#19287) * fix marianMT convertion to onnx * Update src/transformers/onnx/convert.py Co-authored-by: lewtun * Update src/transformers/onnx/convert.py Co-authored-by: lewtun Co-authored-by: lewtun --- src/transformers/models/marian/configuration_marian.py | 4 ++++ src/transformers/onnx/convert.py | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/marian/configuration_marian.py b/src/transformers/models/marian/configuration_marian.py index e2db7b526561d5..96f39b5b4e1ebf 100644 --- a/src/transformers/models/marian/configuration_marian.py +++ b/src/transformers/models/marian/configuration_marian.py @@ -392,3 +392,7 @@ def _flatten_past_key_values_(self, flattened_output, name, idx, t): flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 diff --git a/src/transformers/onnx/convert.py b/src/transformers/onnx/convert.py index 2da00e2b0dea6c..234724699e82f2 100644 --- a/src/transformers/onnx/convert.py +++ b/src/transformers/onnx/convert.py @@ -450,10 +450,12 @@ def validate_model_outputs( # Values if not np.allclose(ref_value, ort_value, atol=atol): + bad_indices = np.logical_not(np.isclose(ref_value, ort_value, atol=atol)) logger.info(f"\t\t-[x] values not close enough (atol: {atol})") raise ValueError( "Outputs values doesn't match between reference model and ONNX exported model: " - f"Got max absolute difference of: {np.amax(np.abs(ref_value - ort_value))}" + f"Got max absolute difference of: {np.amax(np.abs(ref_value - ort_value))} for " + f"{ref_value[bad_indices]} vs {ort_value[bad_indices]}" ) else: logger.info(f"\t\t-[✓] all values close (atol: {atol})") From 7d5ce6802ec5bab29d60e3501337d3477f31b866 Mon Sep 17 00:00:00 2001 From: wei zhao Date: Mon, 10 Oct 2022 21:16:58 +0800 Subject: [PATCH 512/539] Fix typo in image-classification/README.md (#19424) Fix link typo of the following content. PyTorch version, Trainer PyTorch version, no Trainer --- examples/pytorch/image-classification/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/pytorch/image-classification/README.md b/examples/pytorch/image-classification/README.md index 904981451c6f80..04b4748774ddf7 100644 --- a/examples/pytorch/image-classification/README.md +++ b/examples/pytorch/image-classification/README.md @@ -23,8 +23,8 @@ This directory contains 2 scripts that showcase how to fine-tune any model suppo Try out the inference widget here: https://huggingface.co/google/vit-base-patch16-224 Content: -- [PyTorch version, Trainer](#pytorch-version-no-trainer) -- [PyTorch version, no Trainer](#pytorch-version-trainer) +- [PyTorch version, Trainer](#pytorch-version-trainer) +- [PyTorch version, no Trainer](#pytorch-version-no-trainer) ## PyTorch version, Trainer @@ -208,4 +208,4 @@ This command is the same and will work for: Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it. -Regarding using custom data with this script, we refer to [using your own data](#using-your-own-data). \ No newline at end of file +Regarding using custom data with this script, we refer to [using your own data](#using-your-own-data). From 298f6a98c210541fa2ab64f1d13ee9ac801032cc Mon Sep 17 00:00:00 2001 From: Lysandre Debut Date: Mon, 10 Oct 2022 15:19:33 +0200 Subject: [PATCH 513/539] Stop relying on huggingface_hub's private methods (#19392) * Leverage hfh for move cache * Style --- src/transformers/utils/hub.py | 26 +++----------------------- 1 file changed, 3 insertions(+), 23 deletions(-) diff --git a/src/transformers/utils/hub.py b/src/transformers/utils/hub.py index a6ef0c86cdcb7a..582b6cbb4beb5a 100644 --- a/src/transformers/utils/hub.py +++ b/src/transformers/utils/hub.py @@ -34,11 +34,11 @@ HfFolder, create_commit, create_repo, + get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) -from huggingface_hub.constants import HUGGINGFACE_HEADER_X_LINKED_ETAG, HUGGINGFACE_HEADER_X_REPO_COMMIT from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, @@ -982,26 +982,6 @@ def get_all_cached_files(cache_dir=None): return cached_files -def get_hub_metadata(url, token=None): - """ - Returns the commit hash and associated etag for a given url. - """ - if token is None: - token = HfFolder.get_token() - headers = {"user-agent": http_user_agent()} - headers["authorization"] = f"Bearer {token}" - - r = huggingface_hub.file_download._request_with_retry( - method="HEAD", url=url, headers=headers, allow_redirects=False - ) - hf_raise_for_status(r) - commit_hash = r.headers.get(HUGGINGFACE_HEADER_X_REPO_COMMIT) - etag = r.headers.get(HUGGINGFACE_HEADER_X_LINKED_ETAG) or r.headers.get("ETag") - if etag is not None: - etag = huggingface_hub.file_download._normalize_etag(etag) - return etag, commit_hash - - def extract_info_from_url(url): """ Extract repo_name, revision and filename from an url. @@ -1069,11 +1049,11 @@ def move_cache(cache_dir=None, new_cache_dir=None, token=None): url = file_info.pop("url") if url not in hub_metadata: try: - hub_metadata[url] = get_hub_metadata(url, token=token) + hub_metadata[url] = get_hf_file_metadata(url, use_auth_token=token) except requests.HTTPError: continue - etag, commit_hash = hub_metadata[url] + etag, commit_hash = hub_metadata[url].etag, hub_metadata[url].commit_hash if etag is None or commit_hash is None: continue From 3080bb4754e641b169ee5485441f4f79872f587e Mon Sep 17 00:00:00 2001 From: Mohit Sharma Date: Mon, 10 Oct 2022 18:50:19 +0530 Subject: [PATCH 514/539] Add onnx support for VisionEncoderDecoder (#19254) * Add onnx support for VisionEncoderDecoder * Add onnx support for VisionEncoderDecoder * Removed unused import * Rename encoder hidden state Co-authored-by: lewtun * Update docstrings and removed redundant code * Added test function for enc-dec models * Update doc string text Co-authored-by: lewtun * fixed code style Co-authored-by: lewtun --- docs/source/en/serialization.mdx | 8 ++ .../models/vision_encoder_decoder/__init__.py | 6 +- .../configuration_vision_encoder_decoder.py | 101 ++++++++++++++ src/transformers/onnx/__main__.py | 125 +++++++++++++----- src/transformers/onnx/config.py | 3 +- src/transformers/onnx/features.py | 6 + tests/onnx/test_onnx_v2.py | 91 ++++++++++++- 7 files changed, 305 insertions(+), 35 deletions(-) diff --git a/docs/source/en/serialization.mdx b/docs/source/en/serialization.mdx index c6bd29bc63d8ac..7c89cac4431940 100644 --- a/docs/source/en/serialization.mdx +++ b/docs/source/en/serialization.mdx @@ -96,6 +96,7 @@ Ready-made configurations include the following architectures: - SqueezeBERT - Swin Transformer - T5 +- Vision Encoder decoder - ViT - XLM - XLM-RoBERTa @@ -294,6 +295,13 @@ that can be used for fast autoregressive decoding. + + +For `VisionEncoderDecoder` type models, the encoder and decoder parts are +exported separately as two ONNX files named `encoder_model.onnx` and `decoder_model.onnx` respectively. + + + ## Exporting a model for an unsupported architecture diff --git a/src/transformers/models/vision_encoder_decoder/__init__.py b/src/transformers/models/vision_encoder_decoder/__init__.py index 5d501b8feb83c4..fcb53d9d133756 100644 --- a/src/transformers/models/vision_encoder_decoder/__init__.py +++ b/src/transformers/models/vision_encoder_decoder/__init__.py @@ -27,7 +27,9 @@ ) -_import_structure = {"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig"]} +_import_structure = { + "configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"] +} try: if not is_torch_available(): @@ -54,7 +56,7 @@ _import_structure["modeling_flax_vision_encoder_decoder"] = ["FlaxVisionEncoderDecoderModel"] if TYPE_CHECKING: - from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig + from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): diff --git a/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py b/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py index b2c3b2aaccaafc..14ada4c8b7bffc 100644 --- a/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py +++ b/src/transformers/models/vision_encoder_decoder/configuration_vision_encoder_decoder.py @@ -15,12 +15,19 @@ # limitations under the License. import copy +from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict + +from packaging import version from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig +if TYPE_CHECKING: + from ... import PreTrainedTokenizerBase, TensorType + logger = logging.get_logger(__name__) @@ -119,3 +126,97 @@ def to_dict(self): output["decoder"] = self.decoder.to_dict() output["model_type"] = self.__class__.model_type return output + + +class VisionEncoderDecoderEncoderOnnxConfig(OnnxConfig): + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict( + [ + ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), + ] + ) + + @property + def atol_for_validation(self) -> float: + return 1e-4 + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}}) + + +class VisionEncoderDecoderDecoderOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + common_inputs = OrderedDict() + common_inputs["input_ids"] = {0: "batch", 1: "past_decoder_sequence + sequence"} + common_inputs["attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} + common_inputs["encoder_hidden_states"] = {0: "batch", 1: "encoder_sequence"} + + return common_inputs + + def generate_dummy_inputs( + self, + tokenizer: "PreTrainedTokenizerBase", + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional["TensorType"] = None, + ) -> Mapping[str, Any]: + import torch + + common_inputs = OrderedDict() + + dummy_input = super().generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + batch, encoder_sequence = dummy_input["input_ids"].shape + encoder_hidden_states_shape = (batch, encoder_sequence, self._config.encoder_hidden_size) + common_inputs["input_ids"] = dummy_input.pop("input_ids") + common_inputs["attention_mask"] = dummy_input.pop("attention_mask") + common_inputs["encoder_hidden_states"] = torch.zeros(encoder_hidden_states_shape) + + return common_inputs + + +class VisionEncoderDecoderOnnxConfig(OnnxConfig): + @property + def inputs(self) -> None: + pass + + def get_encoder_config(self, encoder_config: PretrainedConfig) -> OnnxConfig: + r""" + Returns ONNX encoder config for `VisionEncoderDecoder` model. + + Args: + encoder_config (`PretrainedConfig`): + The encoder model's configuration to use when exporting to ONNX. + + Returns: + [`VisionEncoderDecoderEncoderOnnxConfig`]: An instance of the ONNX configuration object + """ + return VisionEncoderDecoderEncoderOnnxConfig(encoder_config) + + def get_decoder_config( + self, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, feature: str = "default" + ) -> OnnxConfig: + r""" + Returns ONNX decoder config for `VisionEncoderDecoder` model. + + Args: + encoder_config (`PretrainedConfig`): + The encoder model's configuration to use when exporting to ONNX. + decoder_config (`PretrainedConfig`): + The decoder model's configuration to use when exporting to ONNX + feature (`str`, *optional*): + The type of feature to export the model with. + + Returns: + [`VisionEncoderDecoderDecoderOnnxConfig`]: An instance of the ONNX configuration object. + """ + decoder_config.encoder_hidden_size = encoder_config.hidden_size + return VisionEncoderDecoderDecoderOnnxConfig(decoder_config, feature) diff --git a/src/transformers/onnx/__main__.py b/src/transformers/onnx/__main__.py index 55ad5f54c99494..b84e12edbb2455 100644 --- a/src/transformers/onnx/__main__.py +++ b/src/transformers/onnx/__main__.py @@ -22,6 +22,9 @@ from .features import FeaturesManager +ENCODER_DECODER_MODELS = ["vision-encoder-decoder"] + + def main(): parser = ArgumentParser("Hugging Face Transformers ONNX exporter") parser.add_argument( @@ -65,48 +68,110 @@ def main(): if not args.output.parent.exists(): args.output.parent.mkdir(parents=True) - # Instantiate the appropriate preprocessor - if args.preprocessor == "auto": - preprocessor = get_preprocessor(args.model) - elif args.preprocessor == "tokenizer": - preprocessor = AutoTokenizer.from_pretrained(args.model) - elif args.preprocessor == "feature_extractor": - preprocessor = AutoFeatureExtractor.from_pretrained(args.model) - elif args.preprocessor == "processor": - preprocessor = AutoProcessor.from_pretrained(args.model) - else: - raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'") - # Allocate the model model = FeaturesManager.get_model_from_feature( args.feature, args.model, framework=args.framework, cache_dir=args.cache_dir ) + model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=args.feature) onnx_config = model_onnx_config(model.config) - # Ensure the requested opset is sufficient - if args.opset is None: - args.opset = onnx_config.default_onnx_opset + if model_kind in ENCODER_DECODER_MODELS: + encoder_model = model.get_encoder() + decoder_model = model.get_decoder() - if args.opset < onnx_config.default_onnx_opset: - raise ValueError( - f"Opset {args.opset} is not sufficient to export {model_kind}. " - f"At least {onnx_config.default_onnx_opset} is required." + encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config) + decoder_onnx_config = onnx_config.get_decoder_config( + encoder_model.config, decoder_model.config, feature=args.feature ) - onnx_inputs, onnx_outputs = export( - preprocessor, - model, - onnx_config, - args.opset, - args.output, - ) + if args.opset is None: + args.opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset) + + if args.opset < min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset): + raise ValueError( + f"Opset {args.opset} is not sufficient to export {model_kind}. At least " + f" {min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)} is required." + ) + + preprocessor = AutoFeatureExtractor.from_pretrained(args.model) + + onnx_inputs, onnx_outputs = export( + preprocessor, + encoder_model, + encoder_onnx_config, + args.opset, + args.output.parent.joinpath("encoder_model.onnx"), + ) + + validate_model_outputs( + encoder_onnx_config, + preprocessor, + encoder_model, + args.output.parent.joinpath("encoder_model.onnx"), + onnx_outputs, + args.atol if args.atol else encoder_onnx_config.atol_for_validation, + ) + + preprocessor = AutoTokenizer.from_pretrained(args.model) + + onnx_inputs, onnx_outputs = export( + preprocessor, + decoder_model, + decoder_onnx_config, + args.opset, + args.output.parent.joinpath("decoder_model.onnx"), + ) + + validate_model_outputs( + decoder_onnx_config, + preprocessor, + decoder_model, + args.output.parent.joinpath("decoder_model.onnx"), + onnx_outputs, + args.atol if args.atol else decoder_onnx_config.atol_for_validation, + ) + logger.info( + f"All good, model saved at: {args.output.parent.joinpath('encoder_model.onnx').as_posix()}," + f" {args.output.parent.joinpath('decoder_model.onnx').as_posix()}" + ) + + else: + # Instantiate the appropriate preprocessor + if args.preprocessor == "auto": + preprocessor = get_preprocessor(args.model) + elif args.preprocessor == "tokenizer": + preprocessor = AutoTokenizer.from_pretrained(args.model) + elif args.preprocessor == "feature_extractor": + preprocessor = AutoFeatureExtractor.from_pretrained(args.model) + elif args.preprocessor == "processor": + preprocessor = AutoProcessor.from_pretrained(args.model) + else: + raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'") + + # Ensure the requested opset is sufficient + if args.opset is None: + args.opset = onnx_config.default_onnx_opset + + if args.opset < onnx_config.default_onnx_opset: + raise ValueError( + f"Opset {args.opset} is not sufficient to export {model_kind}. " + f"At least {onnx_config.default_onnx_opset} is required." + ) + + onnx_inputs, onnx_outputs = export( + preprocessor, + model, + onnx_config, + args.opset, + args.output, + ) - if args.atol is None: - args.atol = onnx_config.atol_for_validation + if args.atol is None: + args.atol = onnx_config.atol_for_validation - validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol) - logger.info(f"All good, model saved at: {args.output.as_posix()}") + validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol) + logger.info(f"All good, model saved at: {args.output.as_posix()}") if __name__ == "__main__": diff --git a/src/transformers/onnx/config.py b/src/transformers/onnx/config.py index 3b789051a2203f..dc27c0b6924db5 100644 --- a/src/transformers/onnx/config.py +++ b/src/transformers/onnx/config.py @@ -103,6 +103,7 @@ class OnnxConfig(ABC): "seq2seq-lm": OrderedDict({"logits": {0: "batch", 1: "decoder_sequence"}}), "sequence-classification": OrderedDict({"logits": {0: "batch"}}), "token-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "vision2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), } def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: List[PatchingSpec] = None): @@ -451,7 +452,6 @@ def generate_dummy_inputs( is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: - # TODO: should we set seq_length = 1 when self.use_past = True? common_inputs = super().generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework @@ -577,7 +577,6 @@ def generate_dummy_inputs( is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: - encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) diff --git a/src/transformers/onnx/features.py b/src/transformers/onnx/features.py index 4d1af87465fbcf..6a0ec0f7c70794 100644 --- a/src/transformers/onnx/features.py +++ b/src/transformers/onnx/features.py @@ -30,6 +30,7 @@ AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, + AutoModelForVision2Seq, ) if is_tf_available(): from transformers.models.auto import ( @@ -98,6 +99,7 @@ class FeaturesManager: "image-segmentation": AutoModelForImageSegmentation, "masked-im": AutoModelForMaskedImageModeling, "semantic-segmentation": AutoModelForSemanticSegmentation, + "vision2seq-lm": AutoModelForVision2Seq, } if is_tf_available(): _TASKS_TO_TF_AUTOMODELS = { @@ -481,6 +483,9 @@ class FeaturesManager: "seq2seq-lm-with-past", onnx_config_cls="models.t5.T5OnnxConfig", ), + "vision-encoder-decoder": supported_features_mapping( + "vision2seq-lm", onnx_config_cls="models.vision_encoder_decoder.VisionEncoderDecoderOnnxConfig" + ), "vit": supported_features_mapping( "default", "image-classification", "masked-im", onnx_config_cls="models.vit.ViTOnnxConfig" ), @@ -582,6 +587,7 @@ def get_model_class_for_feature(feature: str, framework: str = "pt") -> Type: raise KeyError( f"Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}" ) + return task_to_automodel[task] @staticmethod diff --git a/tests/onnx/test_onnx_v2.py b/tests/onnx/test_onnx_v2.py index 4c15a68867895c..81cd55d3bb5a81 100644 --- a/tests/onnx/test_onnx_v2.py +++ b/tests/onnx/test_onnx_v2.py @@ -161,7 +161,6 @@ def test_values_override(self): """ for name, config in OnnxConfigWithPastTestCaseV2.SUPPORTED_WITH_PAST_CONFIGS: with self.subTest(name): - # without past onnx_config_default = OnnxConfigWithPast.from_model_config(config()) self.assertIsNotNone(onnx_config_default.values_override, "values_override should not be None") @@ -220,6 +219,10 @@ def test_values_override(self): ("swin", "microsoft/swin-tiny-patch4-window7-224"), } +PYTORCH_EXPORT_ENCODER_DECODER_MODELS = { + ("vision-encoder-decoder", "nlpconnect/vit-gpt2-image-captioning"), +} + PYTORCH_EXPORT_WITH_PAST_MODELS = { ("bloom", "bigscience/bloom-560m"), ("gpt2", "gpt2"), @@ -347,6 +350,70 @@ def _onnx_export( except (RuntimeError, ValueError) as e: self.fail(f"{name}, {feature} -> {e}") + def _onnx_export_encoder_decoder_models( + self, test_name, name, model_name, feature, onnx_config_class_constructor, device="cpu" + ): + from transformers import AutoFeatureExtractor, AutoTokenizer + from transformers.onnx import export + + model_class = FeaturesManager.get_model_class_for_feature(feature) + config = AutoConfig.from_pretrained(model_name) + model = model_class.from_config(config) + + onnx_config = onnx_config_class_constructor(model.config) + + if is_torch_available(): + from transformers.utils import torch_version + + if torch_version < onnx_config.torch_onnx_minimum_version: + pytest.skip( + "Skipping due to incompatible PyTorch version. Minimum required is" + f" {onnx_config.torch_onnx_minimum_version}, got: {torch_version}" + ) + + encoder_model = model.get_encoder() + decoder_model = model.get_decoder() + + encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config) + decoder_onnx_config = onnx_config.get_decoder_config(encoder_model.config, decoder_model.config, feature) + + preprocessor = AutoFeatureExtractor.from_pretrained(model_name) + + onnx_opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset) + + with NamedTemporaryFile("w") as encoder_output: + onnx_inputs, onnx_outputs = export( + preprocessor, encoder_model, encoder_onnx_config, onnx_opset, Path(encoder_output.name), device=device + ) + validate_model_outputs( + encoder_onnx_config, + preprocessor, + encoder_model, + Path(encoder_output.name), + onnx_outputs, + encoder_onnx_config.atol_for_validation, + ) + + preprocessor = AutoTokenizer.from_pretrained(model_name) + + with NamedTemporaryFile("w") as decoder_output: + onnx_inputs, onnx_outputs = export( + preprocessor, + decoder_model, + decoder_onnx_config, + onnx_config.default_onnx_opset, + Path(decoder_output.name), + device=device, + ) + validate_model_outputs( + decoder_onnx_config, + preprocessor, + decoder_model, + Path(decoder_output.name), + onnx_outputs, + decoder_onnx_config.atol_for_validation, + ) + @parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_MODELS)) @slow @require_torch @@ -363,6 +430,28 @@ def test_pytorch_export(self, test_name, name, model_name, feature, onnx_config_ def test_pytorch_export_on_cuda(self, test_name, name, model_name, feature, onnx_config_class_constructor): self._onnx_export(test_name, name, model_name, feature, onnx_config_class_constructor, device="cuda") + @parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_ENCODER_DECODER_MODELS)) + @slow + @require_torch + @require_vision + @require_rjieba + def test_pytorch_export_encoder_decoder_models( + self, test_name, name, model_name, feature, onnx_config_class_constructor + ): + self._onnx_export_encoder_decoder_models(test_name, name, model_name, feature, onnx_config_class_constructor) + + @parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_ENCODER_DECODER_MODELS)) + @slow + @require_torch + @require_vision + @require_rjieba + def test_pytorch_export_encoder_decoder_models_on_cuda( + self, test_name, name, model_name, feature, onnx_config_class_constructor + ): + self._onnx_export_encoder_decoder_models( + test_name, name, model_name, feature, onnx_config_class_constructor, device="cuda" + ) + @parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_WITH_PAST_MODELS)) @slow @require_torch From 4824741c4cc3aeb04929a41b8c89b2b1fc57fca6 Mon Sep 17 00:00:00 2001 From: Ryan Chan Date: Mon, 10 Oct 2022 14:25:22 +0100 Subject: [PATCH 515/539] Remove dependency of Roberta in Blenderbot (#19411) * Remove dependency of Roberta in Blenderbot * Move Copied from statements to each method of the Roberta classes * Remove copied from line for mask_token.setter * update output from example in docs --- .../blenderbot/tokenization_blenderbot.py | 373 ++++++++++++++++-- .../tokenization_blenderbot_fast.py | 240 ++++++++++- 2 files changed, 575 insertions(+), 38 deletions(-) diff --git a/src/transformers/models/blenderbot/tokenization_blenderbot.py b/src/transformers/models/blenderbot/tokenization_blenderbot.py index 8fabbbf6f24a56..ef0a24e06d9ae0 100644 --- a/src/transformers/models/blenderbot/tokenization_blenderbot.py +++ b/src/transformers/models/blenderbot/tokenization_blenderbot.py @@ -14,10 +14,15 @@ # limitations under the License. """Tokenization class for Blenderbot.""" -from typing import TYPE_CHECKING, List, Optional +import json +import os +from functools import lru_cache +from typing import TYPE_CHECKING, List, Optional, Tuple +import regex as re + +from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging -from ..roberta.tokenization_roberta import RobertaTokenizer if TYPE_CHECKING: @@ -43,32 +48,362 @@ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/blenderbot-3B": 128} -class BlenderbotTokenizer(RobertaTokenizer): - r""" - Construct a Blenderbot tokenizer. +@lru_cache() +# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode +def bytes_to_unicode(): + """ + Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control + characters the bpe code barfs on. + + The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab + if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for + decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup + tables between utf-8 bytes and unicode strings. + """ + bs = ( + list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2**8): + if b not in bs: + bs.append(b) + cs.append(2**8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +# Copied from transformers.models.roberta.tokenization_roberta.get_pairs +def get_pairs(word): + """ + Return set of symbol pairs in a word. + + Word is represented as tuple of symbols (symbols being variable-length strings). + """ + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class BlenderbotTokenizer(PreTrainedTokenizer): + """ + Constructs a Blenderbot tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ``` + >>> from transformers import BlenderbotTokenizer + >>> tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") + >>> tokenizer("Hello world")['input_ids'] + [6950, 1085, 2] + >>> tokenizer(" Hello world")['input_ids'] + [6950, 1085, 2] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. - [`Blenderbot`] is nearly identical to [`RobertaTokenizer`] and runs end-to-end tokenization: punctuation splitting - and wordpiece. The only difference is that it doesn't add BOS token to the beginning of sequences. + - Refer to superclass [`RobertaTokenizer`] for usage examples and documentation concerning parameters. + When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). + + + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (Blenderbot tokenizer detect beginning of words by the preceding space). """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.__init__ with Roberta->Blenderbot, RoBERTa->Blenderbot + def __init__( + self, + vocab_file, + merges_file, + errors="replace", + bos_token="", + eos_token="", + sep_token="", + cls_token="", + unk_token="", + pad_token="", + mask_token="", + add_prefix_space=False, + **kwargs + ): + bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token + eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token + sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token + cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token + unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token + pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token + + # Mask token behave like a normal word, i.e. include the space before it + mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token + + super().__init__( + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + unk_token=unk_token, + sep_token=sep_token, + cls_token=cls_token, + pad_token=pad_token, + mask_token=mask_token, + add_prefix_space=add_prefix_space, + **kwargs, + ) + + with open(vocab_file, encoding="utf-8") as vocab_handle: + self.encoder = json.load(vocab_handle) + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors # how to handle errors in decoding + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + with open(merges_file, encoding="utf-8") as merges_handle: + bpe_merges = merges_handle.read().split("\n")[1:-1] + bpe_merges = [tuple(merge.split()) for merge in bpe_merges] + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + self.add_prefix_space = add_prefix_space + + # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions + self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") + + @property + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot + def vocab_size(self): + return len(self.encoder) + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_vocab with Roberta->Blenderbot, RoBERTa->Blenderbot + def get_vocab(self): + return dict(self.encoder, **self.added_tokens_encoder) + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.bpe with Roberta->Blenderbot, RoBERTa->Blenderbot + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + except ValueError: + new_word.extend(word[i:]) + break + else: + new_word.extend(word[i:j]) + i = j + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._tokenize with Roberta->Blenderbot, RoBERTa->Blenderbot + def _tokenize(self, text): + """Tokenize a string.""" + bpe_tokens = [] + for token in re.findall(self.pat, text): + token = "".join( + self.byte_encoder[b] for b in token.encode("utf-8") + ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) + bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_token_to_id with Roberta->Blenderbot, RoBERTa->Blenderbot + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.encoder.get(token, self.encoder.get(self.unk_token)) + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_id_to_token with Roberta->Blenderbot, RoBERTa->Blenderbot + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.decoder.get(index) + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.convert_tokens_to_string with Roberta->Blenderbot, RoBERTa->Blenderbot + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + text = "".join(tokens) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.save_vocabulary with Roberta->Blenderbot, RoBERTa->Blenderbot + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + merge_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] + ) + + with open(vocab_file, "w", encoding="utf-8") as f: + f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") + + index = 0 + with open(merge_file, "w", encoding="utf-8") as writer: + writer.write("#version: 0.2\n") + for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." + " Please check that the tokenizer is not corrupted!" + ) + index = token_index + writer.write(" ".join(bpe_tokens) + "\n") + index += 1 + + return vocab_file, merge_file + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_special_tokens_mask with Roberta->Blenderbot, RoBERTa->Blenderbot + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return [1] + ([0] * len(token_ids_0)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.create_token_type_ids_from_sequences with Roberta->Blenderbot, RoBERTa->Blenderbot + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does + not make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.prepare_for_tokenization with Roberta->Blenderbot, RoBERTa->Blenderbot + def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): + add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) + if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): + text = " " + text + return (text, kwargs) def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format: - - single sequence: ` X ` - Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Will be ignored - Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ @@ -90,19 +425,3 @@ def _build_conversation_input_ids(self, conversation: "Conversation") -> List[in input_ids = input_ids[-self.model_max_length :] logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.") return input_ids - - -def get_pairs(word): - """ - Return set of symbol pairs in a word. - - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - - pairs = set(pairs) - return pairs diff --git a/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py b/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py index 258068b9c343f9..508c643f4d8236 100644 --- a/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py +++ b/src/transformers/models/blenderbot/tokenization_blenderbot_fast.py @@ -13,11 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization class for Blenderbot.""" +import json +from typing import TYPE_CHECKING, List, Optional, Tuple -from typing import TYPE_CHECKING, List, Optional +from tokenizers import pre_tokenizers, processors +from ...tokenization_utils_base import AddedToken, BatchEncoding +from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging -from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast from .tokenization_blenderbot import BlenderbotTokenizer @@ -44,33 +47,248 @@ PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"facebook/blenderbot-3B": 128} -class BlenderbotTokenizerFast(RobertaTokenizerFast): - r""" - Construct a "fast" Blenderbot tokenizer (backed by HuggingFace's *tokenizers* library). +class BlenderbotTokenizerFast(PreTrainedTokenizerFast): + """ + Construct a "fast" Blenderbot tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2 + tokenizer, using byte-level Byte-Pair-Encoding. + + This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will + be encoded differently whether it is at the beginning of the sentence (without space) or not: + + ``` + >>> from transformers import BlenderbotTokenizerFast + >>> tokenizer = BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B") + >>> tokenizer("Hello world")['input_ids'] + [6950, 1085, 2] + >>> tokenizer(" Hello world")['input_ids'] + [6950, 1085, 2] + ``` + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you + call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. + + + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + merges_file (`str`): + Path to the merges file. + errors (`str`, *optional*, defaults to `"replace"`): + Paradigm to follow when decoding bytes to UTF-8. See + [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. + bos_token (`str`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `""`): + The end of sequence token. - [`BlenderbotFast`] is nearly identical to [`RobertaTokenizerFast`] and runs end-to-end tokenization: punctuation - splitting and wordpiece. The only difference is that it doesn't add BOS token to the beginning of sequences. + - Refer to superclass [`RobertaTokenizerFast`] for usage examples and documentation concerning parameters. + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `""`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + cls_token (`str`, *optional*, defaults to `""`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + unk_token (`str`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `""`): + The token used for padding, for example when batching sequences of different lengths. + mask_token (`str`, *optional*, defaults to `""`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not to add an initial space to the input. This allows to treat the leading word just as any + other word. (Blenderbot tokenizer detect beginning of words by the preceding space). + trim_offsets (`bool`, *optional*, defaults to `True`): + Whether the post processing step should trim offsets to avoid including whitespaces. """ + vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = BlenderbotTokenizer + # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.__init__ with Roberta->Blenderbot, RoBERTa->Blenderbot + def __init__( + self, + vocab_file=None, + merges_file=None, + tokenizer_file=None, + errors="replace", + bos_token="", + eos_token="", + sep_token="", + cls_token="", + unk_token="", + pad_token="", + mask_token="", + add_prefix_space=False, + trim_offsets=True, + **kwargs + ): + super().__init__( + vocab_file, + merges_file, + tokenizer_file=tokenizer_file, + errors=errors, + bos_token=bos_token, + eos_token=eos_token, + sep_token=sep_token, + cls_token=cls_token, + unk_token=unk_token, + pad_token=pad_token, + mask_token=mask_token, + add_prefix_space=add_prefix_space, + trim_offsets=trim_offsets, + **kwargs, + ) + + pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) + if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type")) + pre_tok_state["add_prefix_space"] = add_prefix_space + self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state) + + self.add_prefix_space = add_prefix_space + + tokenizer_component = "post_processor" + tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None) + if tokenizer_component_instance: + state = json.loads(tokenizer_component_instance.__getstate__()) + + # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` + if "sep" in state: + state["sep"] = tuple(state["sep"]) + if "cls" in state: + state["cls"] = tuple(state["cls"]) + + changes_to_apply = False + + if state.get("add_prefix_space", add_prefix_space) != add_prefix_space: + state["add_prefix_space"] = add_prefix_space + changes_to_apply = True + + if state.get("trim_offsets", trim_offsets) != trim_offsets: + state["trim_offsets"] = trim_offsets + changes_to_apply = True + + if changes_to_apply: + component_class = getattr(processors, state.pop("type")) + new_value = component_class(**state) + setattr(self.backend_tokenizer, tokenizer_component, new_value) + + @property + # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot + def mask_token(self) -> str: + """ + `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not + having been set. + + Blenderbot tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will + greedily comprise the space before the **. + """ + if self._mask_token is None: + if self.verbose: + logger.error("Using mask_token, but it is not set yet.") + return None + return str(self._mask_token) + + @mask_token.setter + def mask_token(self, value): + """ + Overriding the default behavior of the mask token to have it eat the space before it. + + This is needed to preserve backward compatibility with all the previously used models based on Roberta. + """ + # Mask token behave like a normal word, i.e. include the space before it + # So we set lstrip to True + value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value + self._mask_token = value + + # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast._batch_encode_plus with Roberta->Blenderbot, RoBERTa->Blenderbot + def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + assert self.add_prefix_space or not is_split_into_words, ( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._batch_encode_plus(*args, **kwargs) + + # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast._encode_plus with Roberta->Blenderbot, RoBERTa->Blenderbot + def _encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + + assert self.add_prefix_space or not is_split_into_words, ( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " + "to use it with pretokenized inputs." + ) + + return super()._encode_plus(*args, **kwargs) + + # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.save_vocabulary with Roberta->Blenderbot, RoBERTa->Blenderbot + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) + + # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.create_token_type_ids_from_sequences with Roberta->Blenderbot, RoBERTa->Blenderbot + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does + not make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Blenderbot sequence has the following format: - - single sequence: ` X ` - Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added token_ids_1 (`List[int]`, *optional*): Will be ignored - Returns: `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ From ba71bf4caedaa90a11f02735eb8bb375aabe70f5 Mon Sep 17 00:00:00 2001 From: Aritra Roy Gosthipaty Date: Mon, 10 Oct 2022 18:56:36 +0530 Subject: [PATCH 516/539] fix: renamed variable name (#18850) The sequence_masked variable is actually the part of the sequence that is kept unmasked for the encoder. This commit renames the variable. --- src/transformers/models/vit_mae/modeling_tf_vit_mae.py | 4 ++-- src/transformers/models/vit_mae/modeling_vit_mae.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py index 3cb702b4f6a4bd..6ecec70623b280 100644 --- a/src/transformers/models/vit_mae/modeling_tf_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_tf_vit_mae.py @@ -254,7 +254,7 @@ def random_masking(self, sequence: tf.Tensor, noise: Optional[tf.Tensor] = None) # keep the first subset ids_keep = ids_shuffle[:, :len_keep] - sequence_masked = tf.gather( + sequence_unmasked = tf.gather( sequence, axis=1, batch_dims=1, @@ -271,7 +271,7 @@ def random_masking(self, sequence: tf.Tensor, noise: Optional[tf.Tensor] = None) # unshuffle to get the binary mask mask = tf.gather(mask, axis=1, batch_dims=1, indices=ids_restore) - return sequence_masked, mask, ids_restore + return sequence_unmasked, mask, ids_restore def call(self, pixel_values: tf.Tensor, noise: tf.Tensor = None) -> tf.Tensor: embeddings = self.patch_embeddings(pixel_values) diff --git a/src/transformers/models/vit_mae/modeling_vit_mae.py b/src/transformers/models/vit_mae/modeling_vit_mae.py index d55ef54473b534..c8d68828221828 100755 --- a/src/transformers/models/vit_mae/modeling_vit_mae.py +++ b/src/transformers/models/vit_mae/modeling_vit_mae.py @@ -251,7 +251,7 @@ def random_masking(self, sequence, noise=None): # keep the first subset ids_keep = ids_shuffle[:, :len_keep] - sequence_masked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, dim)) + sequence_unmasked = torch.gather(sequence, dim=1, index=ids_keep.unsqueeze(-1).repeat(1, 1, dim)) # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([batch_size, seq_length], device=sequence.device) @@ -259,7 +259,7 @@ def random_masking(self, sequence, noise=None): # unshuffle to get the binary mask mask = torch.gather(mask, dim=1, index=ids_restore) - return sequence_masked, mask, ids_restore + return sequence_unmasked, mask, ids_restore def forward(self, pixel_values, noise=None): batch_size, num_channels, height, width = pixel_values.shape From af69360bf96946e6d4d321d96ec7dea991c5cbbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?APAVOU=20Cl=C3=A9ment?= <45719060+clementapa@users.noreply.github.com> Date: Mon, 10 Oct 2022 15:30:59 +0200 Subject: [PATCH 517/539] Add `OPTForQuestionAnswering` (#19402) * Add `OPTForQuestionAnswering` - added `OPTForQuestionAnswering` class based on `BloomForQuestionAnswering` - added `OPTForQuestionAnswering` in common tests - all common tests pass - make fixup done * added docstrings for OPTForQuestionAnswering * Fix docstrings for OPTForQuestionAnswering --- docs/source/en/model_doc/opt.mdx | 5 + src/transformers/__init__.py | 2 + src/transformers/models/auto/modeling_auto.py | 1 + src/transformers/models/opt/__init__.py | 2 + src/transformers/models/opt/modeling_opt.py | 121 +++++++++++++++++- src/transformers/utils/dummy_pt_objects.py | 7 + tests/models/opt/test_modeling_opt.py | 14 +- 7 files changed, 149 insertions(+), 3 deletions(-) diff --git a/docs/source/en/model_doc/opt.mdx b/docs/source/en/model_doc/opt.mdx index 4ab9436b04ef35..612689678f8835 100644 --- a/docs/source/en/model_doc/opt.mdx +++ b/docs/source/en/model_doc/opt.mdx @@ -59,6 +59,11 @@ The original code can be found [here](https://github.com/facebookresearch/metase [[autodoc]] OPTForSequenceClassification - forward +## OPTForQuestionAnswering + +[[autodoc]] OPTForQuestionAnswering + - forward + ## FlaxOPTModel [[autodoc]] FlaxOPTModel diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 370a347c9fcc10..0fd95663a57a70 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -1661,6 +1661,7 @@ "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", + "OPTForQuestionAnswering", ] ) _import_structure["models.owlvit"].extend( @@ -4408,6 +4409,7 @@ from .models.opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, + OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, diff --git a/src/transformers/models/auto/modeling_auto.py b/src/transformers/models/auto/modeling_auto.py index 237c98c5bb4ab6..edd61e1da9b6f3 100644 --- a/src/transformers/models/auto/modeling_auto.py +++ b/src/transformers/models/auto/modeling_auto.py @@ -611,6 +611,7 @@ ("mvp", "MvpForQuestionAnswering"), ("nezha", "NezhaForQuestionAnswering"), ("nystromformer", "NystromformerForQuestionAnswering"), + ("opt", "OPTForQuestionAnswering"), ("qdqbert", "QDQBertForQuestionAnswering"), ("reformer", "ReformerForQuestionAnswering"), ("rembert", "RemBertForQuestionAnswering"), diff --git a/src/transformers/models/opt/__init__.py b/src/transformers/models/opt/__init__.py index 4e5508640972a7..c5a4533c03b543 100644 --- a/src/transformers/models/opt/__init__.py +++ b/src/transformers/models/opt/__init__.py @@ -41,6 +41,7 @@ "OPTModel", "OPTPreTrainedModel", "OPTForSequenceClassification", + "OPTForQuestionAnswering", ] try: @@ -76,6 +77,7 @@ from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, + OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, diff --git a/src/transformers/models/opt/modeling_opt.py b/src/transformers/models/opt/modeling_opt.py index 9ede3cabb8f762..acea546a207e02 100644 --- a/src/transformers/models/opt/modeling_opt.py +++ b/src/transformers/models/opt/modeling_opt.py @@ -22,7 +22,12 @@ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN -from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast +from ...modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + QuestionAnsweringModelOutput, + SequenceClassifierOutputWithPast, +) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, @@ -48,6 +53,11 @@ _SEQ_CLASS_EXPECTED_LOSS = 1.71 _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_0'" +# QuestionAnswering docstring +_QA_EXPECTED_OUTPUT = "'a nice puppet'" +_QA_EXPECTED_LOSS = 7.41 +_QA_TARGET_START_INDEX = 14 +_QA_TARGET_END_INDEX = 15 OPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/opt-125m", @@ -1109,3 +1119,112 @@ def get_input_embeddings(self): def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value + + +@add_start_docstrings( + """ + The OPT Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD + (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + OPT_START_DOCSTRING, +) +class OPTForQuestionAnswering(OPTPreTrainedModel): + _keys_to_ignore_on_load_missing = [r"lm_head.weight"] + + def __init__(self, config: OPTConfig): + super().__init__(config) + self.model = OPTModel(config) + self.qa_outputs = nn.Linear(config.word_embed_proj_dim, 2) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + processor_class=_TOKENIZER_FOR_DOC, + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + qa_target_start_index=_QA_TARGET_START_INDEX, + qa_target_end_index=_QA_TARGET_END_INDEX, + expected_output=_QA_EXPECTED_OUTPUT, + expected_loss=_QA_EXPECTED_LOSS, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.model( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + + logits = self.qa_outputs(hidden_states) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + transformer_outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + def get_input_embeddings(self): + return self.model.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.model.decoder.embed_tokens = value diff --git a/src/transformers/utils/dummy_pt_objects.py b/src/transformers/utils/dummy_pt_objects.py index 72db36cab91383..1546700c5c1231 100644 --- a/src/transformers/utils/dummy_pt_objects.py +++ b/src/transformers/utils/dummy_pt_objects.py @@ -3714,6 +3714,13 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) +class OPTForQuestionAnswering(metaclass=DummyObject): + _backends = ["torch"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["torch"]) + + class OPTForSequenceClassification(metaclass=DummyObject): _backends = ["torch"] diff --git a/tests/models/opt/test_modeling_opt.py b/tests/models/opt/test_modeling_opt.py index bdf3716b597d6b..6b9311b09b2790 100644 --- a/tests/models/opt/test_modeling_opt.py +++ b/tests/models/opt/test_modeling_opt.py @@ -32,7 +32,13 @@ if is_torch_available(): import torch - from transformers import GPT2Tokenizer, OPTForCausalLM, OPTForSequenceClassification, OPTModel + from transformers import ( + GPT2Tokenizer, + OPTForCausalLM, + OPTForQuestionAnswering, + OPTForSequenceClassification, + OPTModel, + ) def prepare_opt_inputs_dict( @@ -178,7 +184,11 @@ def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): @require_torch class OPTModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): - all_model_classes = (OPTModel, OPTForCausalLM, OPTForSequenceClassification) if is_torch_available() else () + all_model_classes = ( + (OPTModel, OPTForCausalLM, OPTForSequenceClassification, OPTForQuestionAnswering) + if is_torch_available() + else () + ) all_generative_model_classes = (OPTForCausalLM,) if is_torch_available() else () is_encoder_decoder = False fx_compatible = True From e3f028f3af9e2fd42bb8cf52ec1bcf720b6fbaf1 Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 10 Oct 2022 14:48:17 +0100 Subject: [PATCH 518/539] Add TF whisper (#19378) * simplify loop * add featur extractor * add model * start conversion * add dropout * initial commit of test files * copnversion for all models * update processor for correct padding * update feature extraction * update integration test logits match * fmnt: off for the logits * on the fly mel bank * small nit * update test * update tokenizer * nit feature extraction * update * update tokenizer test * adds logit processor and update tokenizer to get supress tokens * style * clean convert * revert to original modeling tf utils * Update * update * nit * clean convert file * update tests and nits * quality * slow generation test * ffn_dim to allow customization * update readme * add to toctreee * start fixing integration tests * update tests and code * fix feature extractor * fix config tests common * update code to fix tests * fix feature exctractor * nit feature extraction * update test for new feature extractor * style * add absrtact * large logits wioth custom decoder input ids * wraap around is otrch available * fix feature extractor * correct logits for whisper small.en * nit * fix encoder_attentino_mask * some fixes * remove unnecessary inputs * nits * add normalizer file * update etst tokenization * fix attention mask not defined * fix generate * remove uncoder attention mask useless * update test modeling whisper * update condfig to add second non supress tokens * nits on feature exrtactor * nit for test tokenizers * update etsts * update tests * update tokenization test * fixup * invalidated hf token. Clean convert openai to whisper * fix logit tests * fixup * Add model to README * Fix doc tests * clean merge * revert toc_tree changes * remove useless LogitProcessor * Update whisper .mdx * update config file doc * update configuration docstring * update test tokenization * update test tokenization * update tokenization whisper Added copied from where needed * update feature extraction * nit test name * style * quality * remove get suppress tokens and update non_speech tokens global variables * Update src/transformers/models/whisper/feature_extraction_whisper.py Co-authored-by: Patrick von Platen * clean modeling whisper and test Removed the attention mask arguments that are deprecated * fix large test * Add multilingual audio test, and translate test * style * fix larg multilingual test * nits * add copied from for attention layer * remove attention masks in doc * add english normalizer * Update docs/source/en/model_doc/whisper.mdx Co-authored-by: Patrick von Platen * update tokenization test * remove copied from in whisper attention : no bias in k_proj only * wrap around dependencies in english normalizer * style * correct import generation logits * for now, wrap feature extractor with torch * remove torch depencies for feature extraction and style * Update src/transformers/models/whisper/convert_openai_whisper_to_tfms.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/models/whisper/configuration_whisper.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update docs/source/en/model_doc/whisper.mdx Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * fixup * nit * update logitds * style * nit * nits and fix final tests * add `is_more_itertools_available` to utils * quality * add begin supress tokens, supress tokens to generate args and config * clean supressTokensLogitProcessor in generation logits * Nit naming * add supressTokensAtBegin * udpate tests, supress tokens to None or correct values * nit and style * update RAG to fit test and generate_logit * add copy pasted statment on english normalizer * add arguments to config_common_kwargs * Update src/transformers/generation_utils.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Update src/transformers/generation_logits_process.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * revert changes based on reviews * update doc and nits * Update src/transformers/models/whisper/configuration_whisper.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * Apply suggestions from code review Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Patrick von Platen Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * more nits * last nits * update test configuration common * add BART name in decoder attention mask documentation * Update src/transformers/models/whisper/modeling_whisper.py Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> * style * nit * nit * add english.json file to git * nits on documentation * nit * nits * last styling * add main toctree file * remove sentence piece dependency * clean init file * fix tokenizer that has no dependencies on sentencepiece * update whisper init file, nit * remove english.json file * add get decoder prompt id * All weights loading * Remove hanging pdb * Fixup and tidy up * Use same copied from as PT model * Remove whitespace changes * Remove torch references * Tie embeddings * Remove logits processor input to generate * Update logit values * revert changes and add forced logit processor * nit * clean normalizer * remove protected * Add logit processors and update generation code & tests * Some tidy up * Update docstring * update * update based on review * Update src/transformers/models/whisper/configuration_whisper.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update src/transformers/models/whisper/configuration_whisper.py Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> * Update to reflect changes on the PT model branch * Tidy up * Remove extra whitespace * Fix test - make input ids small enough we can append * Include upstream changes on main * PR comments - add batch tests, remove comments & defaults * Fix model output imports * Update src/transformers/models/whisper/modeling_tf_whisper.py Co-authored-by: Joao Gante * Update src/transformers/generation_tf_logits_process.py Co-authored-by: Joao Gante * Update src/transformers/models/whisper/modeling_tf_whisper.py Co-authored-by: Joao Gante * Update src/transformers/models/whisper/modeling_tf_whisper.py Co-authored-by: Joao Gante * Update tests/models/whisper/test_modeling_tf_whisper.py Co-authored-by: Joao Gante * Update src/transformers/models/whisper/modeling_tf_whisper.py Co-authored-by: Joao Gante * Update src/transformers/models/whisper/modeling_tf_whisper.py Co-authored-by: Joao Gante * Update docstring example * Update src/transformers/models/whisper/modeling_tf_whisper.py Co-authored-by: Matt * Remove changes to adjust_logits_during_generation function * Update src/transformers/models/whisper/modeling_tf_whisper.py Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> * Tidy up imports that don't require TF * Update tests - skip and no more skip * Update tests/generation/test_generation_tf_logits_process.py Co-authored-by: Joao Gante * Update src/transformers/models/whisper/modeling_tf_whisper.py * Update src/transformers/models/whisper/modeling_tf_whisper.py Co-authored-by: Matt * Add training flags * Add (skipped) XLA generation tests * Add embedding correctness test * Add constant ids for generation tests * Make logits finding a bit tidier * Remove unused args * xla generation enabled * Don't skip XLA tests anymore * Fix tests - add position ids to expected signature and update rag generation * Undo method reorder * Remove added whitespace * Remove copy-paste gradient checkopint ref * Remove * Trigger CI - (issue with refs when pulling) Co-authored-by: Arthur Zucker Co-authored-by: Patrick von Platen Co-authored-by: NielsRogge Co-authored-by: Arthur <48595927+ArthurZucker@users.noreply.github.com> Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com> Co-authored-by: Sylvain Gugger <35901082+sgugger@users.noreply.github.com> Co-authored-by: Joao Gante Co-authored-by: Matt Co-authored-by: Joao Gante --- README.md | 8 +- README_ko.md | 22 +- README_zh-hans.md | 20 +- README_zh-hant.md | 20 +- docs/source/en/index.mdx | 2 +- docs/source/en/model_doc/whisper.mdx | 15 +- src/transformers/__init__.py | 14 + .../generation_tf_logits_process.py | 81 + src/transformers/generation_tf_utils.py | 62 +- .../models/auto/modeling_tf_auto.py | 3 + .../models/rag/modeling_tf_rag.py | 1 + src/transformers/models/whisper/__init__.py | 27 +- .../whisper/feature_extraction_whisper.py | 14 - .../models/whisper/modeling_tf_whisper.py | 1401 +++++++++++++++++ src/transformers/utils/dummy_tf_objects.py | 24 + .../test_generation_tf_logits_process.py | 83 + .../whisper/test_modeling_tf_whisper.py | 983 ++++++++++++ tests/test_modeling_tf_common.py | 28 +- utils/check_repo.py | 2 + utils/documentation_tests.txt | 1 + 20 files changed, 2755 insertions(+), 56 deletions(-) create mode 100644 src/transformers/models/whisper/modeling_tf_whisper.py create mode 100644 tests/models/whisper/test_modeling_tf_whisper.py diff --git a/README.md b/README.md index d10436c9510357..4cdf062c188ea1 100644 --- a/README.md +++ b/README.md @@ -55,13 +55,13 @@ limitations under the License. -🤗 Transformers provides thousands of pretrained models to perform tasks on different modalities such as text, vision, and audio. +🤗 Transformers provides thousands of pretrained models to perform tasks on different modalities such as text, vision, and audio. These models can be applied on: -* 📝 Text, for tasks like text classification, information extraction, question answering, summarization, translation, text generation, in over 100 languages. -* 🖼️ Images, for tasks like image classification, object detection, and segmentation. -* 🗣️ Audio, for tasks like speech recognition and audio classification. +* 📝 Text, for tasks like text classification, information extraction, question answering, summarization, translation, text generation, in over 100 languages. +* 🖼️ Images, for tasks like image classification, object detection, and segmentation. +* 🗣️ Audio, for tasks like speech recognition and audio classification. Transformer models can also perform tasks on **several modalities combined**, such as table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering. diff --git a/README_ko.md b/README_ko.md index f16e459976087a..7d72b9f6a3ac92 100644 --- a/README_ko.md +++ b/README_ko.md @@ -59,7 +59,7 @@ limitations under the License. 🤗 Transformers는 이러한 사전학습 모델을 빠르게 다운로드해 특정 텍스트에 사용하고, 원하는 데이터로 fine-tuning해 커뮤니티나 우리의 [모델 허브](https://huggingface.co/models)에 공유할 수 있도록 API를 제공합니다. 또한, 모델 구조를 정의하는 각 파이썬 모듈은 완전히 독립적이여서 연구 실험을 위해 손쉽게 수정할 수 있습니다. -🤗 Transformers는 가장 유명한 3개의 딥러닝 라이브러리를 지원합니다. 이들은 서로 완벽히 연동됩니다 — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/). 간단하게 이 라이브러리 중 하나로 모델을 학습하고, 또 다른 라이브러리로 추론을 위해 모델을 불러올 수 있습니다. +🤗 Transformers는 가장 유명한 3개의 딥러닝 라이브러리를 지원합니다. 이들은 서로 완벽히 연동됩니다 — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/). 간단하게 이 라이브러리 중 하나로 모델을 학습하고, 또 다른 라이브러리로 추론을 위해 모델을 불러올 수 있습니다. ## 온라인 데모 @@ -74,7 +74,7 @@ limitations under the License. - [DistilBERT를 이용한 질문 답변](https://huggingface.co/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species) - [T5로 번역하기](https://huggingface.co/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin) -**[Transformer와 글쓰기](https://transformer.huggingface.co)** 는 이 저장소의 텍스트 생성 능력에 관한 Hugging Face 팀의 공식 데모입니다. +**[Transformer와 글쓰기](https://transformer.huggingface.co)** 는 이 저장소의 텍스트 생성 능력에 관한 Hugging Face 팀의 공식 데모입니다. ## Hugging Face 팀의 커스텀 지원을 원한다면 @@ -258,7 +258,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim. 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. -1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach +1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach 1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. @@ -297,7 +297,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al. 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu. -1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu. 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira. 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen. 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang. @@ -307,9 +307,9 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (from Facebook) released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela. 1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang. 1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya. -1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár. +1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár. 1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder. -1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. +1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. 1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. @@ -325,11 +325,11 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. -1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). -1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). +1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. -1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler +1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. @@ -345,7 +345,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. 1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. -1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. +1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. 1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. @@ -355,7 +355,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli. 1. **[YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos)** (from Huazhong University of Science & Technology) released with the paper [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666) by Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu. 1. **[YOSO](https://huggingface.co/docs/transformers/model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh. -1. 새로운 모델을 올리고 싶나요? 우리가 **상세한 가이드와 템플릿** 으로 새로운 모델을 올리도록 도와드릴게요. 가이드와 템플릿은 이 저장소의 [`templates`](./templates) 폴더에서 확인하실 수 있습니다. [컨트리뷰션 가이드라인](./CONTRIBUTING.md)을 꼭 확인해주시고, PR을 올리기 전에 메인테이너에게 연락하거나 이슈를 오픈해 피드백을 받으시길 바랍니다. +1. 새로운 모델을 올리고 싶나요? 우리가 **상세한 가이드와 템플릿** 으로 새로운 모델을 올리도록 도와드릴게요. 가이드와 템플릿은 이 저장소의 [`templates`](./templates) 폴더에서 확인하실 수 있습니다. [컨트리뷰션 가이드라인](./CONTRIBUTING.md)을 꼭 확인해주시고, PR을 올리기 전에 메인테이너에게 연락하거나 이슈를 오픈해 피드백을 받으시길 바랍니다. 각 모델이 Flax, PyTorch, TensorFlow으로 구현되었는지 또는 🤗 Tokenizers 라이브러리가 지원하는 토크나이저를 사용하는지 확인하려면, [이 표](https://huggingface.co/docs/transformers/index#supported-frameworks)를 확인하세요. diff --git a/README_zh-hans.md b/README_zh-hans.md index 5112456121b51a..474ba18d58319d 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -245,7 +245,7 @@ conda install -c huggingface transformers 1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (来自 Google Research) 伴随论文 [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) 由 Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed 发布。 1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。 1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。 -1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/). +1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/). 1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (来自 Alexa) 伴随论文 [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) 由 Adrian de Wynter and Daniel J. Perry 发布。 1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (来自 Google Research) 伴随论文 [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) 由 Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel 发布。 1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (来自 Inria/Facebook/Sorbonne) 伴随论文 [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) 由 Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot 发布。 @@ -282,7 +282,7 @@ conda install -c huggingface transformers 1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (来自 KAIST) 伴随论文 [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) 由 Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim 发布。 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (来自 OpenAI) 伴随论文 [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) 由 Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever 发布。 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (来自 EleutherAI) 随仓库 [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) 发布。作者为 Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy 发布。 -1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach +1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach 1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (来自 ABEJA) 由 Shinya Otani, Takayoshi Makabe, Anuj Arora, Kyo Hattori。 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (来自 OpenAI) 伴随论文 [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) 由 Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** 发布。 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (来自 EleutherAI) 伴随论文 [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) 由 Ben Wang and Aran Komatsuzaki 发布。 @@ -314,14 +314,14 @@ conda install -c huggingface transformers 1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (来自 Apple) 伴随论文 [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) 由 Sachin Mehta and Mohammad Rastegari 发布。 1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (来自 Microsoft Research) 伴随论文 [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) 由 Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu 发布。 1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (来自 Google AI) 伴随论文 [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 由 Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel 发布。 -1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (来自 中国人民大学 AI Box) 伴随论文 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 由 Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 发布。 +1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (来自 中国人民大学 AI Box) 伴随论文 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 由 Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 发布。 1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (来自华为诺亚方舟实验室) 伴随论文 [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) 由 Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu 发布。 1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (来自 Meta) 伴随论文 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) 由 the NLLB team 发布。 1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (来自 the University of Wisconsin - Madison) 伴随论文 [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 由 Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh 发布。 1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。 1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。 1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。 -1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。 +1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。 1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。 1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。 1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (来自 UCLA NLP) 伴随论文 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 由 Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 发布。 @@ -331,9 +331,9 @@ conda install -c huggingface transformers 1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (来自 Facebook) 伴随论文 [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) 由 Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela 发布。 1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (来自 Google Research) 伴随论文 [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) 由 Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang 发布。 1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (来自 Google Research) 伴随论文 [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) 由 Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya 发布。 -1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár. +1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár. 1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (来自 Google Research) 伴随论文 [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) 由 Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder 发布。 -1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. +1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. 1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (来自 Facebook), 伴随论文 [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) 由 Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov 发布。 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (来自 ZhuiyiTechnology), 伴随论文 [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 由 Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 发布。 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (来自 NVIDIA) 伴随论文 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 由 Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 发布。 @@ -349,11 +349,11 @@ conda install -c huggingface transformers 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (来自 Microsoft Research) 伴随论文 [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) 由 Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou 发布。 -1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). -1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). +1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。 -1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler +1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。 @@ -369,7 +369,7 @@ conda install -c huggingface transformers 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. 1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (来自 OpenAI) 伴随论文 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 由 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 发布。 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。 -1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. +1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。 1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (来自 Facebook AI), 伴随论文 [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) 由 Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index 7f0cd19e1868f2..ebc75e23adb6fb 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -257,7 +257,7 @@ conda install -c huggingface transformers 1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed. 1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston. 1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston. -1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/). +1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/). 1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry. 1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel. 1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot. @@ -294,7 +294,7 @@ conda install -c huggingface transformers 1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim. 1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever. 1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy. -1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach +1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach 1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori. 1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**. 1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released with the paper [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki. @@ -316,7 +316,7 @@ conda install -c huggingface transformers 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. 1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. -1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov +1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. 1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro. @@ -343,9 +343,9 @@ conda install -c huggingface transformers 1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (from Facebook) released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela. 1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang. 1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya. -1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár. +1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár. 1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder. -1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. +1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. 1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov. 1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu. 1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo. @@ -361,17 +361,17 @@ conda install -c huggingface transformers 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. -1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). -1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). +1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. -1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler +1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler 1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. 1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu. 1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu. 1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang. 1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim. -1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. +1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. 1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. @@ -381,7 +381,7 @@ conda install -c huggingface transformers 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. 1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. -1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. +1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. 1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou. 1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov. diff --git a/docs/source/en/index.mdx b/docs/source/en/index.mdx index 2b6a482aee7534..db2f4c843f32e3 100644 --- a/docs/source/en/index.mdx +++ b/docs/source/en/index.mdx @@ -330,7 +330,7 @@ Flax), PyTorch, and/or TensorFlow. | Wav2Vec2 | ✅ | ❌ | ✅ | ✅ | ✅ | | Wav2Vec2-Conformer | ❌ | ❌ | ✅ | ❌ | ❌ | | WavLM | ❌ | ❌ | ✅ | ❌ | ❌ | -| Whisper | ✅ | ❌ | ✅ | ❌ | ❌ | +| Whisper | ✅ | ❌ | ✅ | ✅ | ❌ | | X-CLIP | ❌ | ❌ | ✅ | ❌ | ❌ | | XGLM | ✅ | ✅ | ✅ | ✅ | ✅ | | XLM | ✅ | ❌ | ✅ | ✅ | ❌ | diff --git a/docs/source/en/model_doc/whisper.mdx b/docs/source/en/model_doc/whisper.mdx index beb7bf3798bcb6..40485337cd273f 100644 --- a/docs/source/en/model_doc/whisper.mdx +++ b/docs/source/en/model_doc/whisper.mdx @@ -23,11 +23,11 @@ The abstract from the paper is the following: Tips: -- The model usually performs well without requiring any finetuning. +- The model usually performs well without requiring any finetuning. - The architecture follows a classic encoder-decoder architecture, which means that it relies on the [`~generation_utils.GenerationMixin.generate`] function for inference. - One can use [`WhisperProcessor`] to prepare audio for the model, and decode the predicted ID's back into text. -This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). +This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts). The original code can be found [here](https://github.com/openai/whisper). @@ -66,3 +66,14 @@ The original code can be found [here](https://github.com/openai/whisper). [[autodoc]] WhisperForConditionalGeneration - forward + + +## TFWhisperModel + +[[autodoc]] TFWhisperModel + - call + +## TFWhisperForConditionalGeneration + +[[autodoc]] TFWhisperForConditionalGeneration + - call diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index 0fd95663a57a70..b634ffea72b119 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -2754,6 +2754,14 @@ "TFWav2Vec2PreTrainedModel", ] ) + _import_structure["models.whisper"].extend( + [ + "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFWhisperForConditionalGeneration", + "TFWhisperModel", + "TFWhisperPreTrainedModel", + ] + ) _import_structure["models.xglm"].extend( [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", @@ -5303,6 +5311,12 @@ TFWav2Vec2Model, TFWav2Vec2PreTrainedModel, ) + from .models.whisper import ( + TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFWhisperForConditionalGeneration, + TFWhisperModel, + TFWhisperPreTrainedModel, + ) from .models.xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, diff --git a/src/transformers/generation_tf_logits_process.py b/src/transformers/generation_tf_logits_process.py index b09330e10b34d4..25e287d3875a15 100644 --- a/src/transformers/generation_tf_logits_process.py +++ b/src/transformers/generation_tf_logits_process.py @@ -504,3 +504,84 @@ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf. axis=-1, ) return scores + + +class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor): + r""" + [`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts + generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not + sampled at the begining of the generation. + """ + + def __init__(self, begin_suppress_tokens, begin_index): + self.begin_suppress_tokens = list(begin_suppress_tokens) + self.begin_index = begin_index + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + scores = tf.cond( + tf.equal(cur_len, self.begin_index), + lambda: tf.tensor_scatter_nd_update( + scores, + indices=[[i, token] for i in range(scores.shape[0]) for token in self.begin_suppress_tokens], + updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))], + ), + lambda: scores, + ) + return scores + + +class TFSuppressTokensLogitsProcessor(TFLogitsProcessor): + r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they + are not sampled.""" + + def __init__(self, suppress_tokens): + self.suppress_tokens = list(suppress_tokens) + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + scores = tf.tensor_scatter_nd_update( + scores, + indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens], + updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))], + ) + return scores + + +class TFForceTokensLogitsProcessor(TFLogitsProcessor): + r"""This processor can be used to force a list of tokens. The processor will set their log probs to `0` and all + other tokens to `-inf` so that they are sampled at their corresponding index.""" + + def __init__(self, force_token_map): + force_token_map = dict(force_token_map) + # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the + # index of the array corresponds to the index of the token to be forced, for XLA compatibility. + # Indexes without forced tokens will have an negative value. + force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1 + for index, token in force_token_map.items(): + force_token_array[index] = token + self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32) + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + def _force_token(generation_idx): + batch_size = scores.shape[0] + current_token = self.force_token_array[generation_idx] + + new_scores = tf.ones_like(scores, dtype=scores.dtype) * -float("inf") + indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1) + updates = tf.zeros((batch_size,), dtype=scores.dtype) + new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates) + return new_scores + + scores = tf.cond( + tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]), + # If the current length is geq than the length of force_token_array, the processor does nothing. + lambda: tf.identity(scores), + # Otherwise, it may force a certain token. + lambda: tf.cond( + tf.greater_equal(self.force_token_array[cur_len], 0), + # Only valid (positive) tokens are forced + lambda: _force_token(cur_len), + # Otherwise, the processor does nothing. + lambda: scores, + ), + ) + return scores diff --git a/src/transformers/generation_tf_utils.py b/src/transformers/generation_tf_utils.py index beff677136aa02..14d4d0072e4d49 100644 --- a/src/transformers/generation_tf_utils.py +++ b/src/transformers/generation_tf_utils.py @@ -26,11 +26,14 @@ from .generation_tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, + TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, + TFSuppressTokensAtBeginLogitsProcessor, + TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, @@ -401,6 +404,9 @@ def generate( return_dict_in_generate=None, forced_bos_token_id=None, forced_eos_token_id=None, + suppress_tokens: Optional[List[int]] = None, + begin_suppress_tokens: Optional[List[int]] = None, + forced_decoder_ids: Optional[List[int]] = None, **model_kwargs, ) -> Union[TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: r""" @@ -494,6 +500,14 @@ def generate( the target language token. forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. + suppress_tokens (`List[int]`, *optional*, defaults to `model.config.suppress_tokens`): + A list of tokens that will be supressed at generation. The `SupressTokens` logit processor will set + their log probs to `-inf` so that they are not sampled. + begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`): + A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens` + logit processor will set their log probs to `-inf` so that they are not sampled. + forced_decoder_ids (`List[int]`, *optional*, defaults to `model.config.forced_decoder_ids`): + A list of tokens that will be forced as beginning tokens, before sampling. model_specific_kwargs: Additional model specific kwargs will be forwarded to the `forward` function of the model. @@ -609,6 +623,9 @@ def generate( return_dict_in_generate=return_dict_in_generate, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, + suppress_tokens=suppress_tokens, + begin_suppress_tokens=begin_suppress_tokens, + forced_decoder_ids=forced_decoder_ids, **model_kwargs, ) @@ -648,6 +665,12 @@ def generate( forced_eos_token_id = ( forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id ) + suppress_tokens = suppress_tokens if suppress_tokens is not None else self.config.suppress_tokens + begin_suppress_tokens = ( + begin_suppress_tokens if begin_suppress_tokens is not None else self.config.begin_suppress_tokens + ) + if forced_decoder_ids is None and hasattr(self.config, "forced_decoder_ids"): + forced_decoder_ids = self.config.forced_decoder_ids output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions @@ -1368,6 +1391,9 @@ def _generate( return_dict_in_generate=None, forced_bos_token_id=None, forced_eos_token_id=None, + suppress_tokens=None, + begin_suppress_tokens=None, + forced_decoder_ids=None, **model_kwargs, ) -> Union[TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: r""" @@ -1461,6 +1487,15 @@ def _generate( the target language token. forced_eos_token_id (`int`, *optional*): The id of the token to force as the last generated token when `max_length` is reached. + suppress_tokens (`List[int]`, *optional*, defaults to `model.config.suppress_tokens`): + A list of tokens that will be supressed at generation. The `SupressTokens` logit processor will set + their log probs to `-inf` so that they are not sampled. + begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`): + A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens` + logit processor will set their log probs to `-inf` so that they are not sampled. + forced_decoder_ids (`List[int]`, *optional*, defaults to `model.config.forced_decoder_ids`): + A list of tokens that will be forced as beginning tokens. + model_kwargs: Additional model specific kwargs will be forwarded to the `call` function of the model. @@ -1695,12 +1730,16 @@ def _generate( logits_processor = self._get_logits_processor( repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, + input_ids_seq_length=input_ids_seq_length, bad_words_ids=bad_words_ids, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, forced_bos_token_id=forced_bos_token_id, forced_eos_token_id=forced_eos_token_id, + suppress_tokens=suppress_tokens, + begin_suppress_tokens=begin_suppress_tokens, + forced_decoder_ids=forced_decoder_ids, ) # 9. go into different generation modes @@ -1994,7 +2033,7 @@ def _update_attention(model_kwargs, new_past_index, is_encoder_decoder): def _initialize_past(past, num_padding_values, batch_axis): """initialize past with zeros -- the structure depends on `batch_axis`""" if batch_axis == 0: - padding_values = tf.scatter_nd(indices=[[2, 1]], updates=[num_padding_values], shape=(4, 2)) + padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32) new_past = () for past_layer in past: new_past_layer = list(past_layer) @@ -2099,12 +2138,16 @@ def _get_logits_processor( self, repetition_penalty: float, no_repeat_ngram_size: int, + input_ids_seq_length: int, bad_words_ids: List[List[int]], min_length: int, max_length: int, eos_token_id: int, forced_bos_token_id: int, forced_eos_token_id: int, + suppress_tokens: Optional[List[int]] = None, + begin_suppress_tokens: Optional[List[int]] = None, + forced_decoder_ids: Optional[List[int]] = None, ) -> TFLogitsProcessorList: """ This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`] @@ -2118,6 +2161,12 @@ def _get_logits_processor( ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id + suppress_tokens = suppress_tokens if suppress_tokens is not None else self.config.suppress_tokens + begin_suppress_tokens = ( + begin_suppress_tokens if begin_suppress_tokens is not None else self.config.begin_suppress_tokens + ) + if forced_decoder_ids is None and hasattr(self.config, "forced_decoder_ids"): + forced_decoder_ids = self.config.forced_decoder_ids # instantiate processors list if repetition_penalty is not None and repetition_penalty != 1.0: @@ -2132,7 +2181,16 @@ def _get_logits_processor( processors.append(TFForcedBOSTokenLogitsProcessor(forced_bos_token_id)) if forced_eos_token_id is not None: processors.append(TFForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)) - + if suppress_tokens is not None: + processors.append(TFSuppressTokensLogitsProcessor(suppress_tokens)) + if begin_suppress_tokens is not None: + begin_index = input_ids_seq_length + begin_index = begin_index if (input_ids_seq_length > 1 or forced_bos_token_id is None) else begin_index + 1 + if forced_decoder_ids is not None: + begin_index += forced_decoder_ids[-1][0] # generation starts after the last token that is forced + processors.append(TFSuppressTokensAtBeginLogitsProcessor(begin_suppress_tokens, begin_index)) + if forced_decoder_ids is not None: + processors.append(TFForceTokensLogitsProcessor(forced_decoder_ids)) return processors def greedy_search( diff --git a/src/transformers/models/auto/modeling_tf_auto.py b/src/transformers/models/auto/modeling_tf_auto.py index e13a0754b69261..db462aa621864a 100644 --- a/src/transformers/models/auto/modeling_tf_auto.py +++ b/src/transformers/models/auto/modeling_tf_auto.py @@ -80,6 +80,7 @@ ("vit", "TFViTModel"), ("vit_mae", "TFViTMAEModel"), ("wav2vec2", "TFWav2Vec2Model"), + ("whisper", "TFWhisperModel"), ("xglm", "TFXGLMModel"), ("xlm", "TFXLMModel"), ("xlm-roberta", "TFXLMRobertaModel"), @@ -145,6 +146,7 @@ ("t5", "TFT5ForConditionalGeneration"), ("tapas", "TFTapasForMaskedLM"), ("transfo-xl", "TFTransfoXLLMHeadModel"), + ("whisper", "TFWhisperForConditionalGeneration"), ("xlm", "TFXLMWithLMHeadModel"), ("xlm-roberta", "TFXLMRobertaForMaskedLM"), ("xlnet", "TFXLNetLMHeadModel"), @@ -253,6 +255,7 @@ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict( [ ("speech_to_text", "TFSpeech2TextForConditionalGeneration"), + ("whisper", "TFWhisperForConditionalGeneration"), ] ) diff --git a/src/transformers/models/rag/modeling_tf_rag.py b/src/transformers/models/rag/modeling_tf_rag.py index cd09a83d9f0437..8e9a7f32cabfa9 100644 --- a/src/transformers/models/rag/modeling_tf_rag.py +++ b/src/transformers/models/rag/modeling_tf_rag.py @@ -1262,6 +1262,7 @@ def extend_enc_output(tensor, num_beams=None): eos_token_id=eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, + input_ids_seq_length=tf.shape(decoder_input_ids)[-1], ) model_kwargs["attention_mask"] = context_attention_mask diff --git a/src/transformers/models/whisper/__init__.py b/src/transformers/models/whisper/__init__.py index ea7259cf69c411..71e354a9361600 100644 --- a/src/transformers/models/whisper/__init__.py +++ b/src/transformers/models/whisper/__init__.py @@ -17,7 +17,7 @@ # limitations under the License. from typing import TYPE_CHECKING -from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _import_structure = { @@ -41,6 +41,18 @@ "WhisperPreTrainedModel", ] +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_whisper"] = [ + "TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFWhisperForConditionalGeneration", + "TFWhisperModel", + "TFWhisperPreTrainedModel", + ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig @@ -61,6 +73,19 @@ WhisperPreTrainedModel, ) + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_whisper import ( + TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, + TFWhisperForConditionalGeneration, + TFWhisperModel, + TFWhisperPreTrainedModel, + ) + else: import sys diff --git a/src/transformers/models/whisper/feature_extraction_whisper.py b/src/transformers/models/whisper/feature_extraction_whisper.py index 0d6bbd9ed18bbf..dda53dffaafd0a 100644 --- a/src/transformers/models/whisper/feature_extraction_whisper.py +++ b/src/transformers/models/whisper/feature_extraction_whisper.py @@ -218,7 +218,6 @@ def __call__( return_attention_mask: Optional[bool] = None, padding: Optional[str] = "max_length", max_length: Optional[int] = None, - sampling_rate: Optional[int] = None, **kwargs ) -> BatchFeature: """ @@ -262,19 +261,6 @@ def __call__( The value that is used to fill the padding values / vectors. """ - if sampling_rate is not None: - if sampling_rate != self.sampling_rate: - raise ValueError( - f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" - f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with" - f" {self.sampling_rate} and not {sampling_rate}." - ) - else: - logger.warning( - "It is strongly recommended to pass the `sampling_rate` argument to this function. " - "Failing to do so can result in silent errors that might be hard to debug." - ) - is_batched = bool( isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], np.ndarray) or isinstance(raw_speech[0], (tuple, list))) diff --git a/src/transformers/models/whisper/modeling_tf_whisper.py b/src/transformers/models/whisper/modeling_tf_whisper.py new file mode 100644 index 00000000000000..eb6cadac41273b --- /dev/null +++ b/src/transformers/models/whisper/modeling_tf_whisper.py @@ -0,0 +1,1401 @@ +# coding=utf-8 +# Copyright 2022 The OpenAI Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TensorFlow Whisper model.""" + + +import math +import random +from typing import Dict, Optional, Tuple + +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import ( + TFBaseModelOutput, + TFBaseModelOutputWithPastAndCrossAttentions, + TFSeq2SeqLMOutput, + TFSeq2SeqModelOutput, +) +from ...modeling_tf_utils import TFCausalLanguageModelingLoss, TFPreTrainedModel, keras_serializable, unpack_inputs +from ...tf_utils import shape_list, stable_softmax +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings +from .configuration_whisper import WhisperConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "WhisperConfig" + + +TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "openai/whisper-base", + # See all Whisper models at https://huggingface.co/models?filter=whisper +] + +LARGE_NEGATIVE = -1e8 + + +# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right +def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): + pad_token_id = tf.cast(pad_token_id, input_ids.dtype) + decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) + start_tokens = tf.fill( + (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) + ) + shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids = tf.where( + shifted_input_ids == -100, + tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), + shifted_input_ids, + ) + + # "Verify that `labels` has only positive values and -100" + assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) + + # Make sure the assertion op is called by wrapping the result in an identity no-op + with tf.control_dependencies([assert_gte0]): + shifted_input_ids = tf.identity(shifted_input_ids) + + return shifted_input_ids + + +# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask +def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz = input_ids_shape[0] + tgt_len = input_ids_shape[1] + mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE + mask_cond = tf.range(shape_list(mask)[-1]) + + mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) + + if past_key_values_length > 0: + mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) + + return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) + + +# Copied from transformers.models.bart.modeling_tf_bart._expand_mask +def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + src_len = shape_list(mask)[1] + tgt_len = tgt_len if tgt_len is not None else src_len + one_cst = tf.constant(1.0) + mask = tf.cast(mask, dtype=one_cst.dtype) + expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) + + return (one_cst - expanded_mask) * LARGE_NEGATIVE + + +class TFWhisperPositionalEmbedding(tf.keras.layers.Layer): + def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None, **kwargs): + super().__init__(**kwargs) + self.num_positions = num_positions + self.embedding_dim = embedding_dim + self.padding_idx = padding_idx + + def build(self, input_shape): + self.weight = self.add_weight( + name="weight", + shape=[self.num_positions, self.embedding_dim], + trainable=True, + ) + super().build(input_shape) + + def call(self, input_ids, past_key_values_length=0): + past_key_values_length = tf.cast(past_key_values_length, tf.int32) + gather_indices = tf.range(tf.shape(input_ids)[-1], delta=1) + past_key_values_length + return tf.gather(self.weight, gather_indices) + + +class TFWhisperAttention(tf.keras.layers.Layer): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + **kwargs + ): + super().__init__(**kwargs) + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = tf.keras.layers.Dropout(dropout) + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=False, name="k_proj") + self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") + self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") + self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") + + # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention._shape with BART->whisper + def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): + return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) + + # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention.call with BART->whisper + def call( + self, + hidden_states: tf.Tensor, + key_value_states: Optional[tf.Tensor] = None, + past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None, + attention_mask: Optional[tf.Tensor] = None, + layer_head_mask: Optional[tf.Tensor] = None, + training: Optional[bool] = False, + ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + bsz, tgt_len, embed_dim = shape_list(hidden_states) + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = tf.concat([past_key_value[0], key_states], axis=2) + value_states = tf.concat([past_key_value[1], value_states], axis=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) + key_states = tf.reshape(key_states, proj_shape) + value_states = tf.reshape(value_states, proj_shape) + + src_len = shape_list(key_states)[1] + attn_weights = tf.matmul(query_states, key_states, transpose_b=True) + + tf.debugging.assert_equal( + shape_list(attn_weights), + [bsz * self.num_heads, tgt_len, src_len], + message=( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {shape_list(attn_weights)}" + ), + ) + + if attention_mask is not None: + tf.debugging.assert_equal( + shape_list(attention_mask), + [bsz, 1, tgt_len, src_len], + message=( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" + f" {shape_list(attention_mask)}" + ), + ) + + attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) + attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask + attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) + + attn_weights = stable_softmax(attn_weights, axis=-1) + + if layer_head_mask is not None: + tf.debugging.assert_equal( + shape_list(layer_head_mask), + [self.num_heads], + message=( + f"Head mask for a single layer should be of size {(self.num_heads)}, but is" + f" {shape_list(layer_head_mask)}" + ), + ) + + attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( + attn_weights, (bsz, self.num_heads, tgt_len, src_len) + ) + attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) + + attn_probs = self.dropout(attn_weights, training=training) + attn_output = tf.matmul(attn_probs, value_states) + + tf.debugging.assert_equal( + shape_list(attn_output), + [bsz * self.num_heads, tgt_len, self.head_dim], + message=( + f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" + f" {shape_list(attn_output)}" + ), + ) + + attn_output = tf.transpose( + tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) + ) + attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) + + attn_output = self.out_proj(attn_output) + attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + + return attn_output, attn_weights, past_key_value + + +# Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextEncoderLayer with Speech2Text->Whisper +class TFWhisperEncoderLayer(tf.keras.layers.Layer): + def __init__(self, config: WhisperConfig, **kwargs): + super().__init__(**kwargs) + self.embed_dim = config.d_model + self.self_attn = TFWhisperAttention( + self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" + ) + self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") + self.dropout = tf.keras.layers.Dropout(config.dropout) + self.activation_fn = get_tf_activation(config.activation_function) + self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) + self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") + self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") + self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") + + def call( + self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training: bool = False + ): + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`tf.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)` + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + hidden_states, self_attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + training=training, + ) + + tf.debugging.assert_equal( + shape_list(hidden_states), + shape_list(residual), + message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", + ) + + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout(hidden_states, training=training) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + + return hidden_states, self_attn_weights + + +# Copied from transformers.models.speech_to_text.modeling_tf_speech_to_text.TFSpeech2TextDecoderLayer with Speech2Text->Whisper +class TFWhisperDecoderLayer(tf.keras.layers.Layer): + def __init__(self, config: WhisperConfig, **kwargs): + super().__init__(**kwargs) + self.embed_dim = config.d_model + + self.self_attn = TFWhisperAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + name="self_attn", + is_decoder=True, + ) + self.dropout = tf.keras.layers.Dropout(config.dropout) + self.activation_fn = get_tf_activation(config.activation_function) + self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) + + self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") + self.encoder_attn = TFWhisperAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + name="encoder_attn", + is_decoder=True, + ) + self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") + self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") + self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") + self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") + + def call( + self, + hidden_states, + attention_mask: Optional[tf.Tensor] = None, + encoder_hidden_states: Optional[tf.Tensor] = None, + encoder_attention_mask: Optional[tf.Tensor] = None, + layer_head_mask: Optional[tf.Tensor] = None, + cross_attn_layer_head_mask: Optional[tf.Tensor] = None, + past_key_value: Optional[Tuple[tf.Tensor]] = None, + training=False, + ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: + """ + Args: + hidden_states (`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`tf.Tensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`tf.Tensor`): + cross attention input to the layer of shape `(seq_len, batch, embed_dim)` + encoder_attention_mask (`tf.Tensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size + `(decoder_attention_heads,)` + cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. + `(decoder_attention_heads,)` + past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states + """ + residual = hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + training=training, + ) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + training=training, + ) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.final_layer_norm(hidden_states) + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = self.activation_dropout(hidden_states, training=training) + hidden_states = self.fc2(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = residual + hidden_states + + return ( + hidden_states, + self_attn_weights, + cross_attn_weights, + present_key_value, + ) + + +class TFWhisperPreTrainedModel(TFPreTrainedModel): + config_class = WhisperConfig + base_model_prefix = "model" + main_input_name = "input_features" + + def _get_feat_extract_output_lengths(self, input_lengths: tf.Tensor) -> int: + """ + Computes the output length of the convolutional layers + """ + input_lengths = (input_lengths - 1) // 2 + 1 + + return input_lengths + + @property + def dummy_inputs(self) -> Dict[str, tf.Tensor]: + """ + Dummy inputs to build the network. + + Returns: + `Dict[str, tf.Tensor]`: The dummy inputs. + """ + return { + self.main_input_name: tf.random.uniform( + [2, self.config.num_mel_bins, self.config.max_source_positions * 2 - 1], dtype=tf.float32 + ), + "decoder_input_ids": tf.constant([[2, 3]], dtype=tf.int64), + } + + @tf.function( + input_signature=[ + { + "input_features": tf.TensorSpec((None, None, None), tf.float32, name="input_features"), + "decoder_input_ids": tf.TensorSpec((None, None), tf.int64, name="decoder_input_ids"), + "decoder_attention_mask": tf.TensorSpec((None, None), tf.int64, name="decoder_attention_mask"), + } + ] + ) + def serving(self, inputs): + output = self.call(inputs) + return self.serving_output(output) + + +WHISPER_START_DOCSTRING = r""" + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`WhisperConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. +""" + +WHISPER_INPUTS_DOCSTRING = r""" + Args: + input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): + Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained + by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* + via the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the + [`WhisperFeatureExtractor`] should be used for extracting the fbank features, padding and conversion into a + tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] + decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`SpeechToTextTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + SpeechToText uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If + `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should read + [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the + paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. + head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. decoder_inputs_embeds (`tf.Tensor` of shape + `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing + `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is + used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is + useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@keras_serializable +class TFWhisperEncoder(tf.keras.layers.Layer): + config_class = WhisperConfig + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`TFWhisperEncoderLayer`]. + + Args: + config: WhisperConfig + embed_tokens (TFWhisperEmbedding): output embedding + """ + + def __init__(self, config: WhisperConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.layerdrop = config.encoder_layerdrop + + self.embed_dim = config.d_model + self.num_mel_bins = config.num_mel_bins + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_source_positions + self.embed_scale = math.sqrt(self.embed_dim) if config.scale_embedding else 1.0 + + # Padding is added in call() to match the PyTorch implementation + self.conv1 = tf.keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=1, padding="valid", name="conv1") + self.conv2 = tf.keras.layers.Conv1D(self.embed_dim, kernel_size=3, strides=2, padding="valid", name="conv2") + + self.embed_positions = TFWhisperPositionalEmbedding( + self.max_source_positions, self.embed_dim, name="embed_positions" + ) + + self.encoder_layers = [TFWhisperEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] + self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") + + self.dropout = tf.keras.layers.Dropout(config.dropout) + + @unpack_inputs + def call( + self, + input_features=None, + head_mask=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ): + r""" + Args: + input_features (`tf.Tensor` of shape `(batch_size, feature_size, sequence_length)`): + Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be + obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a + `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into + `input_features`, the [`WhisperFeatureExtractor`] should be used for extracting the fbank features, + padding and conversion into a tensor of type `tf.Tensor`. See [`~WhisperFeatureExtractor.__call__`] + head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # TF 2.0 layers can't use channels first format when running on CPU. + input_features = tf.transpose(input_features, perm=(0, 2, 1)) + input_features = tf.pad(input_features, [[0, 0], [1, 1], [0, 0]]) + inputs_embeds = tf.keras.activations.gelu(self.conv1(input_features)) + inputs_embeds = tf.pad(inputs_embeds, [[0, 0], [1, 1], [0, 0]]) + inputs_embeds = tf.keras.activations.gelu(self.conv2(inputs_embeds)) + inputs_embeds = tf.transpose(inputs_embeds, perm=(0, 1, 2)) + + embed_pos = self.embed_positions(input_ids=tf.zeros((1, self.max_source_positions), dtype=tf.int32)) + + hidden_states = inputs_embeds + embed_pos + hidden_states = self.dropout(hidden_states, training=training) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + tf.debugging.assert_equal( + shape_list(head_mask)[0], + len(self.encoder_layers), + message=( + f"The head_mask should be specified for {len(self.encoder_layers)} layers, but it is for" + f" {shape_list(head_mask)[0]}." + ), + ) + + for idx, encoder_layer in enumerate(self.encoder_layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if training and (dropout_probability < self.layerdrop): # skip the layer + continue + + hidden_states, attn = encoder_layer( + hidden_states, + None, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + training=training, + ) + + if output_attentions: + all_attentions += (attn,) + + hidden_states = self.layer_norm(hidden_states) + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return TFBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +@keras_serializable +class TFWhisperDecoder(tf.keras.layers.Layer): + config_class = WhisperConfig + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFWhisperDecoderLayer`] + + Args: + config: WhisperConfig + """ + + def __init__(self, config: WhisperConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.dropout = tf.keras.layers.Dropout(config.dropout) + self.layerdrop = config.decoder_layerdrop + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_target_positions + self.max_source_positions = config.max_source_positions + self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 + + self.embed_tokens = tf.keras.layers.Embedding( + input_dim=config.vocab_size, + output_dim=config.d_model, + embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std), + name="embed_tokens", + ) + self.embed_positions = TFWhisperPositionalEmbedding( + self.max_target_positions, config.d_model, name="embed_positions" + ) + + self.decoder_layers = [TFWhisperDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] + + self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + batch_size, seq_len = input_shape[0], input_shape[1] + + combined_attention_mask = tf.cond( + tf.math.greater(seq_len, 1), + lambda: _make_causal_mask(input_shape, past_key_values_length=past_key_values_length), + lambda: _expand_mask(tf.ones((batch_size, seq_len + past_key_values_length)), tgt_len=seq_len), + ) + + if attention_mask is not None: + attention_mask = tf.cond( + tf.greater(tf.shape(attention_mask)[-1], seq_len) & tf.greater(seq_len, 0), + lambda: attention_mask[:, : seq_len + past_key_values_length], + lambda: attention_mask, + ) + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1]) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + return combined_attention_mask + + @unpack_inputs + def call( + self, + input_ids=None, + attention_mask=None, + position_ids=None, + encoder_hidden_states=None, + head_mask=None, + cross_attn_head_mask=None, + past_key_values=None, + inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ): + r""" + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the + range `[0, config.max_position_embeddings - 1]`. + encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention + on hidden heads. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(tf.Tensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(tf.Tensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape + `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` + you can choose to directly pass an embedded representation. This is useful if you want more control + over how to convert `input_ids` indices into associated vectors than the model's internal embedding + lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input_shape = tf.shape(input_ids) + input_ids = tf.reshape(input_ids, (-1, input_shape[-1])) + elif inputs_embeds is not None: + input_shape = tf.shape(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound + # indices on GPU, returning zeros instead. This is a dangerous silent behavior. + tf.debugging.assert_less( + input_ids, + tf.cast(self.embed_tokens.input_dim, dtype=input_ids.dtype), + message=( + "input_ids must be smaller than the embedding layer's input dimension (got" + f" {tf.math.reduce_max(input_ids)} >= {self.embed_tokens.input_dim})" + ), + ) + inputs_embeds = self.embed_tokens(input_ids) + + attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length) + + # embed positions + filled_past_positions = past_key_values_length if position_ids is None else position_ids[0, -1] + positions = self.embed_positions(input_ids, past_key_values_length=filled_past_positions) + + hidden_states = inputs_embeds + positions + hidden_states = self.dropout(hidden_states, training=training) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: + if attn_mask is not None: + tf.debugging.assert_equal( + shape_list(attn_mask)[0], + len(self.decoder_layers), + message=( + f"The {attn_mask_name} should be specified for {len(self.decoder_layers)} layers, but it is" + f" for {shape_list(attn_mask)[0]}." + ), + ) + + for idx, decoder_layer in enumerate(self.decoder_layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + dropout_probability = random.uniform(0, 1) + if training and (dropout_probability < self.layerdrop): + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), + past_key_value=past_key_value, + training=training, + ) + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[3],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + hidden_states = self.layer_norm(hidden_states) + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return TFBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare Whisper Model outputting raw hidden-states without any specific head on top.", + WHISPER_START_DOCSTRING, +) +@keras_serializable +class TFWhisperMainLayer(tf.keras.layers.Layer): + config_class = WhisperConfig + + def __init__(self, config: WhisperConfig, **kwargs): + super().__init__(**kwargs) + self.config = config + self.encoder = TFWhisperEncoder(config, name="encoder") + self.decoder = TFWhisperDecoder(config, name="decoder") + + def get_input_embeddings(self): + return self.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.decoder.embed_tokens = value + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @unpack_inputs + def call( + self, + input_features=None, + decoder_input_ids=None, + decoder_attention_mask=None, + decoder_position_ids=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + encoder_outputs=None, + past_key_values=None, + decoder_inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ): + r""" + Returns: + + Example: + + ```python + >>> import tensorflow as tf + >>> from transformers import TFWhisperModel, WhisperFeatureExtractor + >>> from datasets import load_dataset + + >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") + >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = feature_extractor( + ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="tf" + ... ) + >>> input_features = inputs.input_features + >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id + >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state + >>> list(last_hidden_state.shape) + [1, 2, 512] + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_features, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): + encoder_outputs = TFBaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + position_ids=decoder_position_ids, + encoder_hidden_states=encoder_outputs[0], + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return TFSeq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + "The bare Whisper Model outputting raw hidden-states without any specific head on top.", + WHISPER_START_DOCSTRING, +) +class TFWhisperModel(TFWhisperPreTrainedModel): + def __init__(self, config: WhisperConfig, **kwargs): + super().__init__(config, **kwargs) + + self.model = TFWhisperMainLayer(config, name="model") + + def get_input_embeddings(self): + return self.model.decoder.embed_tokens + + def set_input_embeddings(self, value): + self.model.decoder.embed_tokens = value + + def get_encoder(self): + return self.model.encoder + + def get_decoder(self): + return self.model.decoder + + def decoder(self): + return self.model.decoder + + def encoder(self): + return self.model.encoder + + @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @unpack_inputs + def call( + self, + input_features=None, + decoder_input_ids=None, + decoder_attention_mask=None, + decoder_position_ids=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + encoder_outputs=None, + past_key_values=None, + decoder_inputs_embeds=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ): + r""" + Returns: + + Example: + + ```python + >>> import tensorflow as tf + >>> from transformers import TFWhisperModel, WhisperFeatureExtractor + >>> from datasets import load_dataset + + >>> model = TFWhisperModel.from_pretrained("openai/whisper-base") + >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = feature_extractor( + ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="tf" + ... ) + >>> input_features = inputs.input_features + >>> decoder_input_ids = tf.convert_to_tensor([[1, 1]]) * model.config.decoder_start_token_id + >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state + >>> list(last_hidden_state.shape) + [1, 2, 512] + ```""" + outputs = self.model( + input_features=input_features, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + decoder_position_ids=decoder_position_ids, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + past_key_values=past_key_values, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + return outputs + + def serving_output(self, output): + pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None + dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None + dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None + cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None + enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None + enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None + + return TFSeq2SeqModelOutput( + last_hidden_state=output.last_hidden_state, + past_key_values=pkv, + decoder_hidden_states=dec_hs, + decoder_attentions=dec_attns, + cross_attentions=cross_attns, + encoder_last_hidden_state=output.encoder_last_hidden_state, + encoder_hidden_states=enc_hs, + encoder_attentions=enc_attns, + ) + + +@add_start_docstrings( + "The Whisper Model with a language modeling head. Can be used for automatic speech recognition.", + WHISPER_START_DOCSTRING, +) +class TFWhisperForConditionalGeneration(TFWhisperPreTrainedModel, TFCausalLanguageModelingLoss): + base_model_prefix = "model" + _keys_to_ignore_on_load_missing = [ + r"encoder.version", + r"decoder.version", + r"proj_out.weight", + ] + _keys_to_ignore_on_save = [ + r"proj_out.weight", + ] + + def __init__(self, config: WhisperConfig, **kwargs): + super().__init__(config, **kwargs) + self.model = TFWhisperMainLayer(config, name="model") + + def get_encoder(self): + return self.model.get_encoder() + + def get_decoder(self): + return self.model.get_decoder() + + def get_output_embeddings(self): + return self.get_input_embeddings() + + def set_output_embeddings(self, value): + self.set_input_embeddings(value) + + def resize_token_embeddings(self, new_num_tokens: int) -> tf.keras.layers.Embedding: + new_embeddings = super().resize_token_embeddings(new_num_tokens) + return new_embeddings + + @add_start_docstrings_to_model_forward(WHISPER_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + @unpack_inputs + def call( + self, + input_features=None, + decoder_input_ids=None, + decoder_attention_mask=None, + decoder_position_ids=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + encoder_outputs=None, + past_key_values=None, + decoder_inputs_embeds=None, + labels=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ): + r""" + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` + or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is + only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >>> import tensorflow as tf + >>> from transformers import WhisperProcessor, TFWhisperForConditionalGeneration + >>> from datasets import load_dataset + + >>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + + >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="tf") + >>> input_features = inputs.input_features + + >>> generated_ids = model.generate(inputs=input_features) + + >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + >>> transcription + ' Mr. Quilter is the apostle of the middle classes, and we are glad to' + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None: + if decoder_input_ids is None: + decoder_input_ids = shift_tokens_right( + labels, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + outputs = self.model( + input_features, + decoder_input_ids=decoder_input_ids, + encoder_outputs=encoder_outputs, + decoder_attention_mask=decoder_attention_mask, + decoder_position_ids=decoder_position_ids, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + decoder_last_hidden_state = outputs[0] + # Decoder and encoder embeddings are tied + lm_logits = tf.matmul(decoder_last_hidden_state, self.get_output_embeddings().weights, transpose_b=True) + + loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFSeq2SeqLMOutput( + loss=loss, + logits=lm_logits, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + def serving_output(self, output): + pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None + dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None + dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None + cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None + enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None + enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None + + return TFSeq2SeqLMOutput( + logits=output.logits, + past_key_values=pkv, + decoder_hidden_states=dec_hs, + decoder_attentions=dec_attns, + cross_attentions=cross_attns, + encoder_last_hidden_state=output.encoder_last_hidden_state, + encoder_hidden_states=enc_hs, + encoder_attentions=enc_attns, + ) + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past=None, + use_cache=None, + encoder_outputs=None, + attention_mask=None, + decoder_attention_mask=None, + **kwargs + ): + # cut decoder_input_ids if past is used + if past is not None: + decoder_input_ids = decoder_input_ids[:, -1:] + + if decoder_attention_mask is not None: # xla + decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] + elif past is not None: # no xla + past + decoder_position_ids = past[0][0].shape[2] + else: # no xla + no past + decoder_position_ids = tf.range(decoder_input_ids.shape[1]) + decoder_position_ids = tf.broadcast_to(decoder_position_ids, decoder_input_ids.shape) + + return { + "input_features": None, # Needs to be passed to make Keras.layer.__call__ happy + "encoder_outputs": encoder_outputs, + "past_key_values": past, + "decoder_input_ids": decoder_input_ids, + "use_cache": use_cache, + "decoder_attention_mask": decoder_attention_mask, + "decoder_position_ids": decoder_position_ids, + } + + # + @staticmethod + def _reorder_cache(past, beam_idx): + reordered_past = () + for layer_past in past: + reordered_past += (tuple(tf.gather(past_state, beam_idx) for past_state in layer_past),) + return reordered_past diff --git a/src/transformers/utils/dummy_tf_objects.py b/src/transformers/utils/dummy_tf_objects.py index 37075076f9c802..7cec699498c9ab 100644 --- a/src/transformers/utils/dummy_tf_objects.py +++ b/src/transformers/utils/dummy_tf_objects.py @@ -2394,6 +2394,30 @@ def __init__(self, *args, **kwargs): requires_backends(self, ["tf"]) +TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST = None + + +class TFWhisperForConditionalGeneration(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWhisperModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + +class TFWhisperPreTrainedModel(metaclass=DummyObject): + _backends = ["tf"] + + def __init__(self, *args, **kwargs): + requires_backends(self, ["tf"]) + + TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST = None diff --git a/tests/generation/test_generation_tf_logits_process.py b/tests/generation/test_generation_tf_logits_process.py index 676a392204d0cc..f87d488dbd3a98 100644 --- a/tests/generation/test_generation_tf_logits_process.py +++ b/tests/generation/test_generation_tf_logits_process.py @@ -29,11 +29,14 @@ from transformers.generation_tf_logits_process import ( TFForcedBOSTokenLogitsProcessor, TFForcedEOSTokenLogitsProcessor, + TFForceTokensLogitsProcessor, TFLogitsProcessorList, TFMinLengthLogitsProcessor, TFNoBadWordsLogitsProcessor, TFNoRepeatNGramLogitsProcessor, TFRepetitionPenaltyLogitsProcessor, + TFSuppressTokensAtBeginLogitsProcessor, + TFSuppressTokensLogitsProcessor, TFTemperatureLogitsWarper, TFTopKLogitsWarper, TFTopPLogitsWarper, @@ -331,6 +334,86 @@ def test_forced_eos_token_logits_processor(self, use_xla): scores = logits_processor(input_ids, scores, cur_len) self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) + @parameterized.expand([(False,), (True,)]) + def test_suppress_tokens_at_begin_logits_processor(self, use_xla): + vocab_size = 20 + batch_size = 4 + + begin_suppress_tokens = [1, 2, 3] + begin_index = 5 + + logits_processor = TFSuppressTokensAtBeginLogitsProcessor( + begin_suppress_tokens=begin_suppress_tokens, begin_index=begin_index + ) + if use_xla: + logits_processor = tf.function(logits_processor, jit_compile=True) + + # Check that no scores are suppressed if begin_index is not reached + cur_len = 4 + input_ids = tf.convert_to_tensor([[11, 17, 15, 8], [14, 0, 19, 5], [13, 11, 18, 19], [11, 12, 16, 15]]) + scores = self._get_uniform_logits(batch_size, vocab_size) + scores = logits_processor(input_ids, scores, cur_len) + self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) + + # Check that scores are suppressed if begin_index is reached + cur_len = 5 + input_ids = tf.convert_to_tensor([[5, 5, 5, 0, 17], [18, 1, 9, 14, 17], [18, 6, 8, 15, 19], [8, 12, 17, 1, 2]]) + scores = self._get_uniform_logits(batch_size, vocab_size) + scores = logits_processor(input_ids, scores, cur_len) + self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, begin_suppress_tokens, axis=1)))) + + @parameterized.expand([(False,), (True,)]) + def test_suppress_tokens_logits_processor(self, use_xla): + vocab_size = 20 + batch_size = 4 + + suppress_tokens = [1, 3, 5] + keep_tokens = [i for i in range(vocab_size) if i not in suppress_tokens] + + logits_processor = TFSuppressTokensLogitsProcessor(suppress_tokens=suppress_tokens) + if use_xla: + logits_processor = tf.function(logits_processor, jit_compile=True) + + # Check that suppress_tokens are suppressed and others are not + cur_len = 5 + input_ids = tf.convert_to_tensor([[0, 10, 19, 6, 3], [17, 4, 8, 17, 2], [7, 1, 11, 6, 15], [5, 8, 13, 16, 0]]) + scores = self._get_uniform_logits(batch_size, vocab_size) + scores = logits_processor(input_ids, scores, cur_len) + self.assertTrue(tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, suppress_tokens, axis=1)))) + self.assertFalse(tf.math.reduce_any(tf.math.is_inf(tf.gather(scores, keep_tokens, axis=1)))) + + @parameterized.expand([(False,), (True,)]) + def test_force_tokens_logits_processor(self, use_xla): + vocab_size = 20 + batch_size = 4 + + force_token_map = {1: 2, 3: 2} + + logits_processor = TFForceTokensLogitsProcessor(force_token_map=force_token_map) + if use_xla: + logits_processor = tf.function(logits_processor, jit_compile=True) + + # check that if the cur_len is contained in the force_token_map, the logits are the same + # for all tokens except the one the force_token_map points to + cur_len = 1 + input_ids = tf.convert_to_tensor([[11], [7], [5], [15]]) + ids_tensor((batch_size, cur_len), vocab_size=20) + scores = self._get_uniform_logits(batch_size, vocab_size) + scores = logits_processor(input_ids, scores, cur_len) + tf.debugging.assert_near(tf.gather(scores, [force_token_map[cur_len]], axis=1), 0.0) + + non_forced_inds = [i for i in range(vocab_size) if i != force_token_map[cur_len]] + self.assertTrue( + tf.math.reduce_all(tf.math.is_inf(tf.gather(scores, [non_forced_inds], axis=1))), + ) + + # check that if the cur_len is not contained in the force_token_map, the logits are not modified + cur_len = 2 + input_ids = tf.convert_to_tensor([[2, 19], [19, 15], [4, 9], [7, 6]]) + scores = self._get_uniform_logits(batch_size, vocab_size) + scores = logits_processor(input_ids, scores, cur_len) + self.assertFalse(tf.math.reduce_any(tf.math.is_inf((scores)))) + @parameterized.expand([(False,), (True,)]) def test_processor_list(self, use_xla): # TODO (Joao): reintroduce TFNoRepeatNGramLogitsProcessor when it gets compatible with XLA diff --git a/tests/models/whisper/test_modeling_tf_whisper.py b/tests/models/whisper/test_modeling_tf_whisper.py new file mode 100644 index 00000000000000..62aeeb1367f1cc --- /dev/null +++ b/tests/models/whisper/test_modeling_tf_whisper.py @@ -0,0 +1,983 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Testing suite for the TensorFlow Whisper model. """ + +import inspect +import tempfile +import unittest + +import numpy as np + +from transformers import WhisperConfig, WhisperFeatureExtractor, WhisperProcessor +from transformers.testing_utils import is_tf_available, require_tf, require_tokenizers, slow +from transformers.utils import cached_property +from transformers.utils.import_utils import is_datasets_available + +from ...test_configuration_common import ConfigTester +from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor + + +if is_datasets_available(): + import datasets + from datasets import load_dataset + + +if is_tf_available(): + import tensorflow as tf + + from transformers import TFWhisperForConditionalGeneration, TFWhisperModel, set_seed + from transformers.models.whisper.modeling_tf_whisper import TFWhisperDecoder, TFWhisperEncoder + + +def prepare_whisper_inputs_dict( + config, + input_features, + decoder_input_ids, + attention_mask=None, + decoder_attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, +): + if decoder_attention_mask is None: + decoder_attention_mask = tf.where(decoder_input_ids != config.pad_token_id, 1, 0) + if head_mask is None: + head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) + if decoder_head_mask is None: + decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) + if cross_attn_head_mask is None: + cross_attn_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) + return { + "input_features": input_features, + "decoder_input_ids": decoder_input_ids, + "decoder_attention_mask": decoder_attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + } + + +@require_tf +class TFWhisperModelTester: + def __init__( + self, + parent, + batch_size=13, + seq_length=60, + is_training=True, + use_labels=False, + vocab_size=99, + hidden_size=16, + num_hidden_layers=2, + num_attention_heads=4, + input_channels=1, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=20, + max_source_positions=30, + max_target_positions=60, + bos_token_id=98, + eos_token_id=98, + pad_token_id=0, + num_mel_bins=80, + decoder_start_token_id=85, + num_conv_layers=1, + suppress_tokens=None, + begin_suppress_tokens=None, + ): + self.parent = parent + self.batch_size = batch_size + self.seq_length = seq_length + self.is_training = is_training + self.use_labels = use_labels + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.input_channels = input_channels + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.num_mel_bins = num_mel_bins + self.max_position_embeddings = max_position_embeddings + self.max_source_positions = max_source_positions + self.max_target_positions = max_target_positions + self.eos_token_id = eos_token_id + self.pad_token_id = pad_token_id + self.bos_token_id = bos_token_id + self.decoder_start_token_id = decoder_start_token_id + self.num_conv_layers = num_conv_layers + self.suppress_tokens = suppress_tokens + self.begin_suppress_tokens = begin_suppress_tokens + + def prepare_config_and_inputs(self): + input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) + + decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) + + config = self.get_config() + inputs_dict = prepare_whisper_inputs_dict( + config, + attention_mask=None, + input_features=input_features, + decoder_input_ids=decoder_input_ids, + ) + return config, inputs_dict + + def get_config(self): + return WhisperConfig( + vocab_size=self.vocab_size, + d_model=self.hidden_size, + encoder_layers=self.num_hidden_layers, + decoder_layers=self.num_hidden_layers, + encoder_attention_heads=self.num_attention_heads, + decoder_attention_heads=self.num_attention_heads, + input_channels=self.input_channels, + dropout=self.hidden_dropout_prob, + attention_dropout=self.attention_probs_dropout_prob, + max_position_embeddings=self.max_position_embeddings, + max_source_positions=self.max_source_positions, + max_target_positions=self.max_target_positions, + eos_token_id=self.eos_token_id, + bos_token_id=self.bos_token_id, + pad_token_id=self.pad_token_id, + decoder_ffn_dim=self.hidden_size, + encoder_ffn_dim=self.hidden_size, + decoder_start_token_id=self.decoder_start_token_id, + suppress_tokens=self.suppress_tokens, + begin_suppress_tokens=self.begin_suppress_tokens, + ) + + def prepare_config_and_inputs_for_common(self): + config, inputs_dict = self.prepare_config_and_inputs() + return config, inputs_dict + + def get_subsampled_output_lengths(self, input_lengths): + """ + Computes the output length of the convolutional layers + """ + + for i in range(self.num_conv_layers): + input_lengths = (input_lengths - 1) // 2 + 1 + + return input_lengths + + def create_and_check_model_forward(self, config, inputs_dict): + model = TFWhisperModel(config=config) + + input_features = inputs_dict["input_features"] + decoder_input_ids = inputs_dict["decoder_input_ids"] + + # first forward pass + last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state + + self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) + + def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): + model = TFWhisperModel(config=config).get_decoder() + # take a slice so we're shorter than the seqeuence length and can append later + input_ids = inputs_dict["decoder_input_ids"][:, :-10] + attention_mask = inputs_dict["decoder_attention_mask"][:, :-10] + + # first forward pass + outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) + + output, past_key_values = outputs.to_tuple() + + # create hypothetical multiple next token and extent to next_input_ids + next_token = ids_tensor((self.batch_size, 3), config.vocab_size) + next_tokens = tf.where(next_token <= 2, 2, next_token) + next_attn_mask = ids_tensor((self.batch_size, 3), 2) + + # append to next input_ids and + next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) + next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) + + output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] + output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ + "last_hidden_state" + ] + + # select random slice + random_slice_idx = np.random.randint(0, output_from_past.shape[-1]) + output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] + output_from_past_slice = output_from_past[:, :, random_slice_idx] + + self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) + + # test that outputs are equal for slice + self.parent.assertTrue(np.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) + + def check_encoder_decoder_model_standalone(self, config, inputs_dict): + model = TFWhisperModel(config=config) + outputs = model(**inputs_dict) + + encoder_last_hidden_state = outputs.encoder_last_hidden_state + last_hidden_state = outputs.last_hidden_state + + with tempfile.TemporaryDirectory() as tmpdirname: + encoder = model.get_encoder() + encoder.save_pretrained(tmpdirname) + encoder = TFWhisperEncoder.from_pretrained(tmpdirname) + + encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] + + self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max() < 1e-3) + + with tempfile.TemporaryDirectory() as tmpdirname: + decoder = model.get_decoder() + decoder.save_pretrained(tmpdirname) + decoder = TFWhisperDecoder.from_pretrained(tmpdirname) + + last_hidden_state_2 = decoder( + input_ids=inputs_dict["decoder_input_ids"], + attention_mask=inputs_dict["decoder_attention_mask"], + encoder_hidden_states=encoder_last_hidden_state, + )[0] + + self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max() < 1e-3) + + +@require_tf +class TFWhisperModelTest(TFModelTesterMixin, unittest.TestCase): + all_model_classes = (TFWhisperModel, TFWhisperForConditionalGeneration) if is_tf_available() else () + all_generative_model_classes = (TFWhisperForConditionalGeneration,) if is_tf_available() else () + is_encoder_decoder = True + fx_compatible = False + test_pruning = False + test_missing_keys = False + test_onnx = False + + input_name = "input_features" + + def setUp(self): + self.model_tester = TFWhisperModelTester(self) + self.config_tester = ConfigTester(self, config_class=WhisperConfig) + self.maxDiff = 3000 + + def test_config(self): + self.config_tester.run_common_tests() + + def test_save_load_strict(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs() + for model_class in self.all_model_classes: + model = model_class(config) + + model(model.dummy_inputs) + + with tempfile.TemporaryDirectory() as tmpdirname: + model.save_pretrained(tmpdirname, saved_model=False) + model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) + self.assertEqual(info["missing_keys"], []) + + def test_model_forward(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_model_forward(*config_and_inputs) + + def test_decoder_model_past_with_large_inputs(self): + config_and_inputs = self.model_tester.prepare_config_and_inputs() + self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) + + def _get_input_ids_and_config(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + input_ids = inputs_dict[self.input_name] + + # cut to half length & take max batch_size 3 + max_batch_size = 3 + input_ids = input_ids[:max_batch_size, :, :] + + # generate max 3 tokens + max_length = input_ids.shape[-1] + 3 + if config.eos_token_id is not None and config.pad_token_id is None: + # hack to allow generate for models such as GPT2 as is done in `generate()` + config.pad_token_id = config.eos_token_id + + return config, input_ids, None, max_length + + # not implemented currently + def test_inputs_embeds(self): + pass + + @unittest.skip("Training is not yet supported") + def test_training(self): + pass + + def test_generate_with_head_masking(self): + pass + + @unittest.skip("fp16 is not yet supported for TF models") + def test_generate_fp16(self): + config, input_dict = self.model_tester.prepare_config_and_inputs() + config.max_target_positions = 400 + input_features = input_dict["input_features"] + model = TFWhisperForConditionalGeneration(config) + model.generate(input_features) + model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) + + def test_forward_signature(self): + config, _ = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + model = model_class(config) + signature = inspect.signature(model.call) + # signature.parameters is an OrderedDict => so arg_names order is deterministic + arg_names = [*signature.parameters.keys()] + + expected_arg_names = [ + "input_features", + "decoder_input_ids", + "decoder_attention_mask", + ] + expected_arg_names.extend( + ["decoder_position_ids", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] + if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names + else ["encoder_outputs"] + ) + self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) + + def test_hidden_states_output(self): + def check_hidden_states_output(inputs_dict, config, model_class): + model = model_class(config) + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states + + expected_num_layers = getattr( + self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 + ) + self.assertEqual(len(hidden_states), expected_num_layers) + + if hasattr(self.model_tester, "encoder_seq_length"): + seq_length = self.model_tester.encoder_seq_length + else: + seq_length = self.model_tester.seq_length + + subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [subsampled_seq_length, self.model_tester.hidden_size], + ) + + if config.is_encoder_decoder: + hidden_states = outputs.decoder_hidden_states + + self.assertIsInstance(hidden_states, (list, tuple)) + self.assertEqual(len(hidden_states), expected_num_layers) + + decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) + + self.assertListEqual( + list(hidden_states[0].shape[-2:]), + [decoder_seq_length, self.model_tester.hidden_size], + ) + + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + + for model_class in self.all_model_classes: + inputs_dict["output_hidden_states"] = True + check_hidden_states_output(inputs_dict, config, model_class) + + # check that output_hidden_states also work using config + del inputs_dict["output_hidden_states"] + config.output_hidden_states = True + + check_hidden_states_output(inputs_dict, config, model_class) + + def test_attention_outputs(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + config.return_dict = True + + seq_len = getattr(self.model_tester, "seq_length", None) + decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) + encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) + encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) + decoder_key_length = getattr(self.model_tester, "decoder_key_length", encoder_key_length) + + for model_class in self.all_model_classes: + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = False + config.return_dict = True + model = model_class(config) + + subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) + subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) + + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + # check that output_attentions also work using config + del inputs_dict["output_attentions"] + config.output_attentions = True + model = model_class(config) + + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) + + self.assertListEqual( + list(attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], + ) + out_len = len(outputs) + + correct_outlen = 5 + + # loss is at first position + if "labels" in inputs_dict: + correct_outlen += 1 # loss is added to beginning + if "past_key_values" in outputs: + correct_outlen += 1 # past_key_values have been returned + + self.assertEqual(out_len, correct_outlen) + + # decoder attentions + decoder_attentions = outputs.decoder_attentions + self.assertIsInstance(decoder_attentions, (list, tuple)) + self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(decoder_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], + ) + + # cross attentions + cross_attentions = outputs.cross_attentions + self.assertIsInstance(cross_attentions, (list, tuple)) + self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(cross_attentions[0].shape[-3:]), + [ + self.model_tester.num_attention_heads, + decoder_seq_length, + subsampled_encoder_key_length, + ], + ) + + # Check attention is always last and order is fine + inputs_dict["output_attentions"] = True + inputs_dict["output_hidden_states"] = True + model = model_class(config) + outputs = model(**self._prepare_for_class(inputs_dict, model_class)) + + added_hidden_states = 2 + self.assertEqual(out_len + added_hidden_states, len(outputs)) + + self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions + + self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) + self.assertListEqual( + list(self_attentions[0].shape[-3:]), + [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], + ) + + def test_generate_without_input_ids(self): + pass + + @staticmethod + def _get_encoder_outputs( + model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 + ): + encoder = model.get_encoder() + encoder_outputs = encoder( + input_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( + num_interleave, dim=0 + ) + input_ids = input_ids[:, :, 0] + input_ids = tf.zeros_like(input_ids[:, :1], dtype=tf.int64) + tf.convert_to_tensor( + [model._get_decoder_start_token_id()] + ) + attention_mask = None + return encoder_outputs, input_ids, attention_mask + + def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): + batch_size, mel, seq_length = input_ids.shape + subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) + num_sequences_in_output = batch_size * num_return_sequences + gen_len = ( + output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length + ) + + # scores + self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) + + # Attentions + # encoder + self._check_encoder_attention_for_generate( + output.encoder_attentions, batch_size, config, subsampled_seq_length + ) + # decoder + self._check_attentions_for_generate( + num_sequences_in_output, + output.decoder_attentions, + min_length=1, + max_length=output.sequences.shape[-1], + config=config, + use_cache=use_cache, + ) + + # Hidden States + # encoder + self._check_encoder_hidden_states_for_generate( + output.encoder_hidden_states, batch_size, config, subsampled_seq_length + ) + + # decoder + self._check_hidden_states_for_generate( + num_sequences_in_output, + output.decoder_hidden_states, + min_length=1, + max_length=output.sequences.shape[-1], + config=config, + use_cache=use_cache, + ) + + # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is + # `input_features` + def test_lm_head_model_random_no_beam_search_generate(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + input_features = inputs_dict.get("input_features", None) + + # iterate over all generative models + for model_class in self.all_generative_model_classes: + model = model_class(config) + + if config.bos_token_id is None: + # if bos token id is not defined model needs input_features + with self.assertRaises(AssertionError): + model.generate(do_sample=True, max_length=5) + # num_return_sequences = 1 + self._check_generated_ids(model.generate(input_features, do_sample=True)) + + with self.assertRaises(ValueError): + # generating multiple sequences when no beam search generation + # is not allowed as it would always generate the same sequences + model.generate(input_features, do_sample=False, num_return_sequences=2) + + # num_return_sequences > 1, sample + self._check_generated_ids(model.generate(input_features, do_sample=True, num_return_sequences=2)) + + # check bad words tokens language generation + # create list of 1-seq bad token and list of 2-seq of bad tokens + bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] + output_tokens = model.generate( + input_features, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2 + ) + # only count generated tokens + generated_ids = output_tokens[:, input_features.shape[-1] :] + self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) + + # overwritten from parent due to the inability to work when non-text inputs are not passed AND because the input is + # `input_features` + def test_lm_head_model_random_beam_search_generate(self): + config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() + input_features = inputs_dict.get("input_features", None) + + for model_class in self.all_generative_model_classes: + model = model_class(config) + + if config.bos_token_id is None: + # if bos token id is not defined model needs input_ids, num_return_sequences = 1 + self._check_generated_ids(model.generate(input_features, do_sample=True, num_beams=2)) + + with self.assertRaises(ValueError): + # generating more sequences than having beams leads is not possible + model.generate(input_features, do_sample=False, num_return_sequences=3, num_beams=2) + + # num_return_sequences > 1, sample + self._check_generated_ids( + model.generate( + input_features, + do_sample=True, + num_beams=2, + num_return_sequences=2, + ) + ) + # num_return_sequences > 1, greedy + self._check_generated_ids( + model.generate(input_features, do_sample=False, num_beams=2, num_return_sequences=2) + ) + + # check bad words tokens language generation + # create list of 1-seq bad token and list of 2-seq of bad tokens + bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)] + output_tokens = model.generate( + input_features, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2 + ) + # only count generated tokens + generated_ids = output_tokens[:, input_features.shape[-1] :] + self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids)) + + +@require_tf +@require_tokenizers +class TFWhisperModelIntegrationTests(unittest.TestCase): + @cached_property + def default_processor(self): + return WhisperProcessor.from_pretrained("openai/whisper-base") + + def _load_datasamples(self, num_samples): + + ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + # automatic decoding with librispeech + speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] + + return [x["array"] for x in speech_samples] + + @slow + def test_tiny_logits_librispeech(self): + set_seed(0) + model = TFWhisperModel.from_pretrained("openai/whisper-tiny") + input_speech = self._load_datasamples(1) + feature_extractor = WhisperFeatureExtractor() + input_features = feature_extractor(input_speech, return_tensors="tf").input_features + + logits = model( + input_features, + decoder_input_ids=tf.convert_to_tensor([[50258, 50259, 50359]]), + output_hidden_states=False, + output_attentions=False, + return_dict=False, + use_cache=False, + ) + + # fmt: off + EXPECTED_LOGITS = tf.convert_to_tensor( + [ + 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, + 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, + 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, + 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 + ] + ) + # fmt: on + self.assertTrue(np.allclose(logits[0][0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) + + # fmt: off + EXPECTED_GENERATION = tf.convert_to_tensor( + [ + -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, + 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, + 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, + 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 + ] + ) + # fmt: on + + head_logits = logits[0] @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) + self.assertTrue(np.allclose(head_logits[0, 0, :30], EXPECTED_GENERATION, atol=1e-4)) + + @slow + def test_small_en_logits_librispeech(self): + set_seed(0) + model = TFWhisperModel.from_pretrained("openai/whisper-small.en") + + input_speech = self._load_datasamples(1) + + feaure_extractor = WhisperFeatureExtractor() + input_features = feaure_extractor(input_speech, return_tensors="tf").input_features + + logits = model( + input_features, + decoder_input_ids=tf.convert_to_tensor([[model.config.decoder_start_token_id]]), + output_hidden_states=False, + output_attentions=False, + use_cache=False, + ) + + logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) + + # fmt: off + EXPECTED_LOGITS = tf.convert_to_tensor( + [ + -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, + -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, + -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, + -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, + -11.1146, -8.1918 + ] + ) + # fmt: on + self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) + + @slow + def test_large_logits_librispeech(self): + set_seed(0) + + model = TFWhisperModel.from_pretrained("openai/whisper-large") + + input_speech = self._load_datasamples(1) + + processor = WhisperProcessor.from_pretrained("openai/whisper-large") + processed_inputs = processor(audio=input_speech, text="This part of the speech", return_tensors="tf") + input_features = processed_inputs.input_features + labels = processed_inputs.labels + + logits = model( + input_features, + decoder_input_ids=labels, + output_hidden_states=False, + output_attentions=False, + use_cache=False, + ) + + logits = logits.last_hidden_state @ tf.transpose(model.model.decoder.embed_tokens.weights[0]) + + # fmt: off + EXPECTED_LOGITS = tf.convert_to_tensor( + [ + 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, + 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, + 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, + 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 + ] + ) + # fmt: on + + self.assertTrue(np.allclose(logits[0, 0, :30], EXPECTED_LOGITS, atol=1e-4)) + + @slow + def test_tiny_en_generation(self): + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + model.config.decoder_start_token_id = 50257 + + input_speech = self._load_datasamples(1) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features + + generated_ids = model.generate(input_features, num_beams=5) + transcript = processor.tokenizer.batch_decode(generated_ids)[0] + + EXPECTED_TRANSCRIPT = ( + "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" + " classes, and we are glad to" + ) + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_tiny_generation(self): + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") + model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") + + input_speech = self._load_datasamples(1) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features + + generated_ids = model.generate(input_features, num_beams=5) + transcript = processor.tokenizer.decode(generated_ids[0]) + + EXPECTED_TRANSCRIPT = ( + "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" + " classes and we are glad" + ) + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_tiny_xla_generation(self): + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") + model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") + + input_speech = self._load_datasamples(1) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features + + xla_generate = tf.function(model.generate, jit_compile=True) + + generated_ids = model.generate(input_features, num_beams=5) + generated_ids_xla = xla_generate(input_features, num_beams=5) + + transcript = processor.tokenizer.decode(generated_ids[0]) + transcript_xla = processor.tokenizer.decode(generated_ids_xla[0]) + + EXPECTED_TRANSCRIPT = ( + "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" + " classes and we are glad" + ) + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + self.assertEqual(transcript_xla, EXPECTED_TRANSCRIPT) + + @slow + def test_large_generation(self): + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-large") + model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") + + input_speech = self._load_datasamples(1) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features + + model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") + generated_ids = model.generate( + input_features, + do_sample=False, + ) + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_large_generation_multilingual(self): + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-large") + model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") + + ds = load_dataset("common_voice", "ja", split="test", streaming=True) + ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) + input_speech = next(iter(ds))["audio"]["array"] + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features + + model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="transcribe") + generated_ids = model.generate(input_features, do_sample=False) + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="en", task="transcribe") + generated_ids = model.generate( + input_features, + do_sample=False, + ) + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + EXPECTED_TRANSCRIPT = " Kimura san ni denwa wo kaite moraimashita" + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(language="ja", task="translate") + generated_ids = model.generate(input_features, do_sample=False) + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + + EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" + self.assertEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_large_batched_generation(self): + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-large") + model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-large") + + input_speech = self._load_datasamples(4) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features + generated_ids = model.generate(input_features) + + # fmt: off + EXPECTED_LOGITS = tf.convert_to_tensor( + [ + [50258, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404, 281], + [50258, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257, 50257], + [50258, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256], + [50258, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439, 11] + ] + ) + # fmt: on + + self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) + + # fmt: off + EXPECTED_TRANSCRIPT = [ + ' Mr. Quilter is the apostle of the middle classes, and we are glad to', + " Nor is Mr. Quilter's manner less interesting than his matter.", + " He tells us that at this festive season of the year, with Christmas and roast beef", + " He has grave doubts whether Sir Frederick Layton's work is really Greek after all," + ] + # fmt: on + + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) + self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_tiny_en_batched_generation(self): + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + + input_speech = self._load_datasamples(4) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features + generated_ids = model.generate(input_features) + + # fmt: off + EXPECTED_LOGITS = tf.convert_to_tensor( + [ + [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], + [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], + [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], + [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] + ] + + ) + # fmt: on + + self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) + + # fmt: off + EXPECTED_TRANSCRIPT = [ + " Mr. Quilter is the apostle of the middle classes, and we are glad to", + " Nor is Mr. Quilter's manner less interesting than his matter.", + " He tells us that at this festive season of the year, with Christmas and roast beef looming", + " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", + ] + # fmt: on + + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) + self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) + + @slow + def test_tiny_en_batched_xla_generation(self): + set_seed(0) + processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") + model = TFWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + + input_speech = self._load_datasamples(4) + input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="tf").input_features + + xla_generate = tf.function(model.generate, jit_compile=True) + + generated_ids = model.generate(input_features) + generated_ids_xla = xla_generate(input_features) + + # fmt: off + EXPECTED_LOGITS = tf.convert_to_tensor( + [ + [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], + [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], + [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], + [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] + ] + + ) + # fmt: on + + self.assertTrue(np.allclose(generated_ids, EXPECTED_LOGITS)) + self.assertTrue(np.allclose(generated_ids_xla, EXPECTED_LOGITS)) + + # fmt: off + EXPECTED_TRANSCRIPT = [ + " Mr. Quilter is the apostle of the middle classes, and we are glad to", + " Nor is Mr. Quilter's manner less interesting than his matter.", + " He tells us that at this festive season of the year, with Christmas and roast beef looming", + " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", + ] + # fmt: on + + transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) + transcript_xla = processor.batch_decode(generated_ids_xla, skip_special_tokens=True) + self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) + self.assertListEqual(transcript_xla, EXPECTED_TRANSCRIPT) diff --git a/tests/test_modeling_tf_common.py b/tests/test_modeling_tf_common.py index b93d8f17e4e11d..ff44a50d359bcb 100644 --- a/tests/test_modeling_tf_common.py +++ b/tests/test_modeling_tf_common.py @@ -736,6 +736,23 @@ def test_compile_tf_model(self): dtype="float32", ), } + elif model_class.__name__ in ["TFWhisperModel", "TFWhisperForConditionalGeneration"]: + inputs = { + "decoder_input_ids": tf.keras.Input( + batch_shape=(2, max_input), + name="decoder_input_ids", + dtype="int32", + ), + "input_features": tf.keras.Input( + batch_shape=( + 2, + self.model_tester.num_mel_bins, + self.model_tester.seq_length, + ), + name="input_features", + dtype="float32", + ), + } elif self.is_encoder_decoder: inputs = { "decoder_input_ids": tf.keras.Input( @@ -1223,8 +1240,17 @@ def test_save_load_after_resize_token_embeddings(self): # fetch the output for an input exclusively made of new members of the vocabulary inputs_dict = copy.deepcopy(original_inputs_dict) - new_vocab_input_ids = ids_tensor(inputs_dict["input_ids"].shape, new_tokens_size) + ids_feat_name = None + if "input_ids" in inputs_dict: + ids_feat_name = "input_ids" + elif "decoder_input_ids" in inputs_dict: + ids_feat_name = "decoder_input_ids" + else: + assert False, "No input ids feature found in the inputs dict" + + new_vocab_input_ids = ids_tensor(inputs_dict[ids_feat_name].shape, new_tokens_size) new_vocab_input_ids += old_total_size + inputs_dict[ids_feat_name] = new_vocab_input_ids if "input_ids" in inputs_dict: inputs_dict["input_ids"] = new_vocab_input_ids if "decoder_input_ids" in inputs_dict: diff --git a/utils/check_repo.py b/utils/check_repo.py index a8c6e888958244..988eb499aa302b 100644 --- a/utils/check_repo.py +++ b/utils/check_repo.py @@ -105,6 +105,8 @@ "TFElectraMainLayer", # Building part of bigger (tested) model (should it be a TFPreTrainedModel ?) "TFRobertaForMultipleChoice", # TODO: fix "TrOCRDecoderWrapper", # Building part of bigger (tested) model. + "TFWhisperEncoder", # Building part of bigger (tested) model. + "TFWhisperDecoder", # Building part of bigger (tested) model. "SeparableConv1D", # Building part of bigger (tested) model. "FlaxBartForCausalLM", # Building part of bigger (tested) model. "FlaxBertForCausalLM", # Building part of bigger (tested) model. Tested implicitly through FlaxRobertaForCausalLM. diff --git a/utils/documentation_tests.txt b/utils/documentation_tests.txt index 7f442c3fdb88dc..a39dc7a56a80ef 100644 --- a/utils/documentation_tests.txt +++ b/utils/documentation_tests.txt @@ -97,4 +97,5 @@ src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py src/transformers/models/wavlm/modeling_wavlm.py src/transformers/models/whisper/modeling_whisper.py +src/transformers/models/whisper/modeling_tf_whisper.py src/transformers/models/yolos/modeling_yolos.py From e150c4e2fec67d6cbe8458d989a139b07ea1fe05 Mon Sep 17 00:00:00 2001 From: Kaiyu Yang Date: Mon, 10 Oct 2022 06:51:11 -0700 Subject: [PATCH 519/539] Fix the error message in run_t5_mlm_flax.py (#19282) --- examples/flax/language-modeling/run_t5_mlm_flax.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/flax/language-modeling/run_t5_mlm_flax.py b/examples/flax/language-modeling/run_t5_mlm_flax.py index c9d748de3d5c09..fd988890d02cc7 100755 --- a/examples/flax/language-modeling/run_t5_mlm_flax.py +++ b/examples/flax/language-modeling/run_t5_mlm_flax.py @@ -349,7 +349,7 @@ def __call__(self, examples: List[Dict[str, np.ndarray]]) -> BatchEncoding: if batch["input_ids"].shape[-1] != self.input_length: raise ValueError( f"`input_ids` are incorrectly preprocessed. `input_ids` length is {batch['input_ids'].shape[-1]}, but" - f" should be {self.target_length}." + f" should be {self.input_length}." ) if batch["labels"].shape[-1] != self.target_length: From b0b962cccab5ae424e16abd8777b24f3e69f2208 Mon Sep 17 00:00:00 2001 From: Stefano Bosisio <75615539+Steboss89@users.noreply.github.com> Date: Mon, 10 Oct 2022 15:12:40 +0100 Subject: [PATCH 520/539] Add Italian translation for `add_new_model.mdx` (#18713) * fix conflicts * start translating * proof check * add toc * fix errors and typos --- docs/source/it/_toctree.yml | 4 + docs/source/it/add_new_model.mdx | 775 +++++++++++++++++++++++++++++++ 2 files changed, 779 insertions(+) create mode 100644 docs/source/it/add_new_model.mdx diff --git a/docs/source/it/_toctree.yml b/docs/source/it/_toctree.yml index afc618dd6faab9..0c7a6986aaf6f1 100644 --- a/docs/source/it/_toctree.yml +++ b/docs/source/it/_toctree.yml @@ -39,3 +39,7 @@ - sections: - local: add_new_pipeline title: Come aggiungere una pipeline a 🤗 Transformers? + - local: add_new_model + title: Come aggiungere un modello a 🤗 Transformers? + title: Guide How-to + diff --git a/docs/source/it/add_new_model.mdx b/docs/source/it/add_new_model.mdx new file mode 100644 index 00000000000000..464ba5830609fa --- /dev/null +++ b/docs/source/it/add_new_model.mdx @@ -0,0 +1,775 @@ + + +# Come aggiungere un modello a 🤗 Transformers? + +Aggiungere un nuovo modello é spesso difficile e richiede una profonda conoscenza della libreria 🤗 Transformers e anche +della repository originale del modello. A Hugging Face cerchiamo di dare alla community sempre piú poteri per aggiungere +modelli independentemente. Quindi, per alcuni nuovi modelli che la community vuole aggiungere a 🤗 Transformers, abbiamo +creato una specifica *call-for-model-addition* che spiega passo dopo passo come aggiungere il modello richiesto. Con +questo *call-for-model-addition* vogliamo insegnare a volenterosi e esperti collaboratori della community come implementare +un modello in 🤗 Transformers. + +Se questo é qualcosa che può interessarvi, siete liberi di controllare l'attuale “calls-for-model-addition” [qui](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model/open_model_proposals/README.md) +e contattarci. + +Se il modello sarà selezionato, allora potrete lavorare insieme a un membro di Hugging Face per integrare il modello in 🤗 +Transformers. Così facendo, ci guadagnerai in una comprensione totale, sia teorica che pratica, del modello proposto. Inoltre, +sarai l'artefice di un importante contributo open-source a 🤗 Transformers. Durante l'implementazione avrai l'opportunità di: + +- ottenere più comprensione delle best practices in open-source +- capire i principi di design di una della librerie NLP più popolari +- capire come efficientemente testare complessi modelli NLP +- capire come integrare utilit Python come `black`, `isort`, `make fix-copies` in una libreria per garantire sempre di avere un codice leggibile e pulito + +Siamo anche contenti se vuoi aggiungere un modello che non può essere trovato nella cartella “calls-for-model-addition”. +Le seguenti sezioni spiegano in dettaglio come aggiungere un nuovo modello. Può anche essere molto utile controllare modelli +già aggiunti [qui](https://github.com/huggingface/transformers/pulls?q=is%3Apr+label%3A%22PR+for+Model+Addition%22+is%3Aclosed), +per capire se richiamano il modello che vorreste aggiungere. + +Per cominciare, vediamo una panoramica general della libreria Transformers. + +## Panoramica generale su 🤗 Transformers + +Prima di tutto, vediamo in generale 🤗 Transformers. 🤗 Transformers é una libreria molto strutturata, quindi +puà essere che a volte ci sia un disaccordo con alcune filosofie della libreria o scelte di design. Dalla nostra esperienza, +tuttavia, abbiamo trovato che le scelte fondamentali di design della libreria sono cruciali per usare 🤗 Transformers efficacemente +su larga scala, mantenendo i costi a un livello accettabile. + +Un buon primo punto di partenza per capire al meglio la libreria é leggere la [documentazione sulla nostra filosofia](filosofia) +Da qui, ci sono alcune scelte sul modo di lavorare che cerchiamo di applicare a tutti i modelli: + +- La composizione é generalmente favorita sulla sovra-astrazione +- Duplicare il codice non é sempre male, soprattutto se migliora notevolmente la leggibilità e accessibilità del modello +- Tutti i files creati per il nuovo modello devono il piu possibile "compatti". Questo vuol dire che quando qualcuno leggerá il codice +di uno specifico modello, potrá vedere solo il corrispettivo file `modeling_....py` senza avere multiple dipendenze. + + +La cosa piú importante, é che consideriamo la libreria non solo un mezzo per dare un prodotto, *per esempio* dare la possibilità +di usare BERT per inferenza, ma é anche il prodotto reale che noi vogliamo migliorare sempre più. Quindi, quando aggiungi +un modello, non sei solo la persona che userà il modello, ma rappresenti anche tutti coloro che leggeranno, +cercheranno di capire e modificare il tuo modello. + +Tenendo questi principi in mente, immergiamoci nel design generale della libreria. + +### Panoramica sui modelli + +Per aggiungere con successo un modello, é importante capire l'interazione tra il tuo modello e la sua configurazione, +[`PreTrainedModel`], e [`PretrainedConfig`]. Per dare un esempio, chiameremo il modello da aggiungere a 🤗 Transformers +`BrandNewBert`. + +Diamo un'occhiata: + + + +Come potete vedere, ci basiamo sull'ereditarietà in 🤗 Transformers, tenendo però il livello di astrazione a un minimo +assoluto. Non ci sono mai più di due livelli di astrazione per ogni modello nella libreria. `BrandNewBertModel` eredita +da `BrandNewBertPreTrainedModel` che, a sua volta, eredita da [`PreTrainedModel`] - semplice no? +Come regola generale, vogliamo essere sicuri che un nuovo modello dipenda solo da [`PreTrainedModel`]. Le funzionalità +importanti che sono automaticamente conferite a ogni nuovo modello sono [`~PreTrainedModel.from_pretrained`] +e [`~PreTrainedModel.save_pretrained`], che sono usate per serializzazione e deserializzazione. Tutte le altre importanti +funzionalità, come ad esempio `BrandNewBertModel.forward` devono essere definite completamente nel nuovo script +`modeling_brand_new_bert.py`. Inoltre, vogliamo essere sicuri che un modello con uno specifico head layer, come +`BrandNewBertForMaskedLM` non erediti da `BrandNewBertModel`, ma piuttosto usi `BrandNewBertModel` +come componente che può essere chiamata nel passaggio forward per mantenere il livello di astrazione basso. Ogni +nuovo modello richieste una classe di configurazione, chiamata `BrandNewBertConfig`. Questa configurazione é sempre +mantenuta come un attributo in [`PreTrainedModel`], e quindi può essere accessibile tramite l'attributo `config` +per tutte le classi che ereditano da `BrandNewBertPreTrainedModel`: + +```python +model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert") +model.config # il modello ha accesso al suo config +``` + +Analogamente al modello, la configurazione eredita le funzionalità base di serializzazione e deserializzazione da +[`PretrainedConfig`]. É da notare che la configurazione e il modello sono sempre serializzati in due formati differenti - +il modello é serializzato in un file *pytorch_model.bin* mentre la configurazione con *config.json*. Chiamando +[`~PreTrainedModel.save_pretrained`] automaticamente chiamerà [`~PretrainedConfig.save_pretrained`], cosicché sia il +modello che la configurazione siano salvati. + + +### Stile per il codice + +Quando codifichi un nuovo modello, tieni presente che Transformers ha una sua struttura di fondo come libreria, perciò +ci sono alcuni fatti da considerare su come scrivere un codice :-) + +1. Il forward pass del tuo modello dev'essere scritto completamente nel file del modello, mentre dev'essere indipendente + da altri modelli nella libreria. Se vuoi riutilizzare un blocco di codice da un altro modello, copia e incolla il codice con un commento `# Copied from` in cima al codice (guarda [qui](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160) + per un ottimo esempio). +2. Il codice dev'essere interamente comprensibile, anche da persone che non parlano in inglese. Questo significa che le + variabili devono avere un nome descrittivo e bisogna evitare abbreviazioni. Per esempio, `activation` é molto meglio + che `act`. Le variabili con una lettera sono da evitare fortemente, almeno che non sia per un indce in un for loop. +3. Generamente é meglio avere un codice esplicito e piú lungo che un codice corto e magico. +4. Evita di subclassare `nn.Sequential` in Pytorch, puoi subclassare `nn.Module` e scrivere il forward pass, cosicché + chiunque può effettuare debug sul tuo codice, aggiungendo print o breaking points. +5. La tua function-signature dev'essere type-annoted. Per il resto, é meglio preferire variabili con un nome accettabile + piuttosto che annotazioni per aumentare la comprensione e leggibilità del codice. + +### Panoramica sui tokenizers + +Questa sezione sarà creata al piu presto :-( + +## Aggiungere un modello a 🤗 Transformers passo dopo passo + +Ci sono differenti modi per aggiungere un modello a Hugging Face. Qui trovi una lista di blog posts da parte della community su come aggiungere un modello: + +1. [Aggiungere GPT2](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) scritto da [Thomas](https://huggingface.co/thomwolf) +2. [Aggiungere WMT19 MT](https://huggingface.co/blog/porting-fsmt) scritto da [Stas](https://huggingface.co/stas) + +Per esperienza, possiamo dirti che quando si aggiunge un modello é meglio tenere a mente le seguenti considerazioni: + +- Non sfondare una porta giá aperta! La maggior parte del codice che aggiungerai per un nuovo modello 🤗 Transformers + esiste già da qualche parte in 🤗 Transformers. Prendi un po' di tempo per trovare codici simili in modelli e tokenizers esistenti e fare un copia-incolla. Ricorda che [grep](https://www.gnu.org/software/grep/) e [rg](https://github.com/BurntSushi/ripgrep) sono tuoi buoni amici. Inoltre, ricorda che puó essere molto probabile che il tokenizer per il tuo modello sia basato sull'implementazione di un altro modello, e il codice del tuo modello stesso su un altro ancora. *Per esempio* il modello FSMT é basato su BART, mentre il tokenizer di FSMT é basato su XLM. +- Ricorda che qui é piu una sfida ingegneristica che scientifica. Spendi piú tempo per create un efficiente ambiente di debugging piuttosto che cercare di capire tutti gli aspetti teorici dell'articolo del modello. +- Chiedi aiuto se sei in panne! I modelli sono la parte principale di 🤗 Transformers, perciò qui a Hugging Face siamo più che contenti di aiutarti in ogni passo per aggiungere il tuo modello. Non esitare a chiedere se vedi che non riesci a progredire. + +Di seguito, diamo una ricetta generale per aiutare a portare un modello in 🤗 Transformers. + +La lista seguente é un sommario di tutto quello che é stato fatto per aggiungere un modello, e può essere usata come To-Do List: + +- 1. ☐ (Opzionale) Capire gli aspetti teorici del modello +- 2. ☐ Preparare l'ambiente dev per transformers +- 3. ☐ Preparare l'ambiente debugging della repository originale +- 4. ☐ Create uno script che gestisca con successo il forward pass usando la repository originale e checkpoint +- 5. ☐ Aggiungere con successo lo scheletro del modello a Transformers +- 6. ☐ Convertire i checkpoint original a Transformers checkpoint +- 7. ☐ Effettuare con successo la forward pass in Transformers, di modo che dia un output identico al checkpoint originale +- 8. ☐ Finire i tests per il modello in Transformers +- 9. ☐ Aggiungere con successo Tokenizer in Transformers +- 10. ☐ Testare e provare gli integration tests da capo a fine +- 11. ☐ Completare i docs +- 12. ☐ Caricare i moedl weights all'hub +- 13. ☐ Sottomettere una pull request +- 14. ☐ (Opzionale) Aggiungere un notebook con una demo + +Per cominciare di solito consigliamo `BrandNewBert`, partendo dalla teoria, di modo da avere una buona comprensione della teoria generale. TUttavia, se preferisci imparare l'aspetto teorico del modello mentre *lavori* sul modello é ok immergersi direttamente nel codice di `BrandNewBert`. Questa opzione puó essere buona se le tue skills ingegneristiche sono meglio che quelle teoriche, o se il paper `BrandNewBert` ti dá problemi, o se semplicemente ti piace programmare piú che leggere articoli scientifici. + +### 1. (Opzionale) Aspetti teorici di BrandNewBert + +Allora con calma, prendi un po' di tempo per leggere l'articolo su *BrandNewBert* . Sicuramente, alcune sezioni dell'articolo sono molto complesse, ma non preoccuparti! L'obiettivo non é avere una compresione immensa della teoria alla base, ma estrarre le informazioni necessarie per re-implementare con successo il modello in 🤗 Transformers. Quindi, non impazzire sugli aspetti teorici, ma piuttosto focalizzati su quelli pratici, ossia: + +- Che tipo di modello é *brand_new_bert*? É solo un encoder in stile BERT? O tipo decoder come GPT2? O encoder e decoder stile BART? Dai un'occhiata a [model_summary](model_summary) se non sei famigliare con le differenze tra questi modelli +- Quali sono le applicazioni di *brand_new_bert*? Classificazione di testo? Generazione di testo? O per tasks del genere seq2seq? +- Quali sono le nuove aggiunte al modello che lo rendono diverso da BERT/GPT-2/BART? +- Quali modelli estistenti in [🤗 Transformers models](https://huggingface.co/transformers/#contents) sono molto simili a *brand_new_bert*? +- Che tipo di tokenizer si usa in questo caso? Un sentencepiece tokenizer? O un word piece tokenizer? Il tokenizer é lo stesso di BERT o BART? + +Una volta che senti che hai avuto una bella overview dell'architettura del modello, puoi scrivere senza problemi al team di Hugging Face per ogni domanda che tu hai. Questo puó includere domande sull'architettura del modello, o sull'attention layer, etc. Saremo molto felici di aiutarti :) + + +### 2. Prepare il tuo ambiente + +1. Forka la [repository](https://github.com/huggingface/transformers) cliccando sul tasto ‘Fork' nella pagina della repository. Questo crea una copia del codice nel tuo account GitHub + +2. Clona il tuo fork `transfomers` sul tuo dico locale, e aggiungi la repository base come remota: + +```bash +git clone https://github.com/[your Github handle]/transformers.git +cd transformers +git remote add upstream https://github.com/huggingface/transformers.git +``` + + +3. Crea un ambiente di sviluppo, per esempio tramite questo comando: + +```bash +python -m venv .env +source .env/bin/activate +pip install -e ".[dev]" +``` + +quindi torna alla directory principale: + +```bash +cd .. +``` + + +4. Attenzione, raccomandiamo di aggiungere la versione di PyTorch di *brand_new_bert* a Transfomers. Per installare PyTorch, basta seguire queste istruzioni https://pytorch.org/get-started/locally/. + +**Nota bene:** Non c'é bisogno di installare o avere installato CUDA. Il nuovo modello può funzionare senza problemi su una CPU. + + +5. Per trasferire *brand_new_bert* To port *brand_new_bert* avrai bisogno anche accesso alla sua repository originale: + +```bash +git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git +cd brand_new_bert +pip install -e . +``` + +Ok, ora hai un ambiente di sviluppo per portare *brand_new_bert* in 🤗 Transformers. + + +### 3.-4. Provare un pretrained checkpoint usando la repo originale + +Per cominciare, comincerai a lavorare sulla repo originale di *brand_new_bert*. Come spesso accade, l'implementazione originale é molto sullo stile "ricerca". Questo significa che a volte la documentazione non é al top, magari manca qualche cosa e il codice puó essere difficile da capire. Tuttavia, questa é e dev'essere la motivazione per reimplementare *brand_new_bert*. In Hugging Face, uno degli obiettivi principali é di *mettere le persone sulle spalle dei giganti*, il che si traduce, in questo contesto, di prendere un modello funzionante e riscriverlo e renderlo il piú possibile **accessibile, user-friendly, e leggibile**. Questa é la top motivazione per re-implementare modelli in 🤗 Transformers - cercare di creare nuove complesse tecnologie NLP accessibili a **chiunque**. + +Riuscire a far girare il modello pretrained originale dalla repository ufficiale é spesso il passo **piu arduo**. Dalla nostra esperienza, é molto importante spendere un p' di tempo per diventare familiari con il codice base originale. Come test, prova a capire i seguenti punti: + +- Dove si trovano i pretrained weights? +- Come caricare i pretrained weights nel modello corrispondente? +- Come girare un tokenizer independentemente dal modello? +- Prova a tracciare un singolo forward pass, cosicché potrai sapere che classi e funzioni sono richieste per un semplice forward pass. Di solito, dovrai reimplementare queste funzioni e basta +- Prova a localizzare i componenti importanti del modello: Dove si trova la classe del modello? Ci sono sotto classi nel modello *per esempio* EngoderModel, DecoderMOdel? Dove si trova il self-attention layer? Ci sono molteplici differenti layer di attention, *per esempio * *self-attention*, *cross-attention*...? +- Come puoi fare debug sul modello nell'ambiente originale della repo? Devi aggiungere dei *print* o puoi usare *ipdb* come debugger interattivo, o vabene anche un IDE efficiente per debug come PyCharm? + +É molto importante che prima di cominciare a trasferire il modello nuovo tu spenda tempo a fare debug del codice originale in maniera **efficiente**! Inoltre, ricorda che tutta la library é open-soruce, quindi non temere di aprire issue o fare una pull request nella repo originale. Tutti coloro che mantengono la repository saranno piú che felici di avere qualcuno che guarda e gioca con i loro codici! + +A questo punto, sta a te decidere quale ambiente per debug vuoi usare. Noi consilgiamo di evitare setup con GPU, che potrebbero costare assai, lavorare su una CPU puó essere un ottimo punto di partenza per indagare la repository originale e per cominciare a scrivere il codice per 🤗 Transformers. Solo alla fine, quando il modello é stato portato con successo in 🤗 Transformers, allora si potrá verificare il suo funzionamento su GPU. + +In generale ci sono due possibili ambienti di debug per il testare il modello originale: + +- [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb) +- Scripts locali in Python + +Il vantaggio dei Jupyter notebooks é la possibilità di eseguire cella per cella, il che può essere utile per decomporre tutte le componenti logiche, cosi da a vere un ciclo di debug più rapido, siccome si possono salvare i risultati da steps intermedi. Inoltre, i notebooks spesso sono molto facili da condividere con altri contributors, il che può essere molto utile se vuoi chiedere aiuto al team di Hugging Face. Se sei famigliare con Jupyter notebooks allora racommandiamo di lavorare in questa maniera. + +Ovviamente se non siete abituati a lavorare con i notebook, questo può essere uno svantaggio nell'usare questa tecnologia, sprecando un sacco di tempo per setup e portare tutto al nuovo ambiente, siccome non potreste neanche usare dei tools di debug come `ipdb`. + +Per ogni pratica code-base, é sempre meglio come primo step caricare un **piccolo** checkpoint pretrained e cercare di riprodurre un singolo forward pass usando un vettore fittizio di IDs fatti da numeri interi. Un esempio per uno script simile, in pseudocodice é: + +```python +model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") +input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids +original_output = model.predict(input_ids) +``` + +Per quanto riguarda la strategia di debugging, si può scegliere tra: + +- Decomporre il modello originario in piccole componenenti e testare ognuna di esse +- Decomporre il modello originario nel *tokenizer* originale e nel *modello* originale, testare un forward pass su questi, +e usare dei print statement o breakpoints intermedi per verificare + +Ancora una volta, siete liberi di scegliere quale strategia sia ottimale per voi. Spesso una strategia é piu +avvantaggiosa di un'altra, ma tutto dipende dall'code-base originario. + +Se il code-base vi permette di decomporre il modello in piccole sub-componenenti, *per esempio* se il code-base +originario può essere facilmente testato in eager mode, allora vale la pena effettuare un debugging di questo genere. +Ricordate che ci sono dei vantaggi nel decidere di prendere la strada piu impegnativa sin da subito: + +- negli stage piu finali, quando bisognerà comparare il modello originario all'implementazione in Hugging Face, potrete verificare +automaticamente ogni componente, individualmente, di modo che ci sia una corrispondenza 1:1 +- avrete l'opportunità di decomporre un problema molto grande in piccoli passi, così da strutturare meglio il vostro lavoro +- separare il modello in componenti logiche vi aiuterà ad avere un'ottima overview sul design del modello, quindi una migliore +comprensione del modello stesso +- verso gli stage finali i test fatti componente per componente vi aiuterà ad essere sicuri di non andare avanti e indietro +nell'implementazione, così da continuare la modifica del codice senza interruzione + +Un ottimo esempio di come questo può essere fatto é dato da [Lysandre](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed) +per il modello ELECTRA + +Tuttavia, se il code-base originale é molto complesso o le componenti intermedie possono essere testate solo in tramite +compilazione, potrebbe richiedere parecchio tempo o addirittura essere impossibile separare il modello in piccole sotto-componenti. +Un buon esempio é [MeshTensorFlow di T5](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow). Questa libreria +é molto complessa e non offre un metodo semplice di decomposizione in sotto-componenti. Per simili librerie, potrete fare +affidamento ai print statements. + +In ogni caso, indipendentemente da quale strategia scegliete, la procedura raccomandata é di cominciare a fare debug dal +primo layer al layer finale. +É consigliato recuperare gli output dai layers, tramite print o sotto-componenti, nel seguente ordine: + +1. Recuperare gli IDs di input dati al modello +2. Recuperare i word embeddings +3. Recuperare l'input del primo Transformer layer +4. Recuperare l'output del primo Transformer layer +5. Recuperare l'output dei seguenti `n - 1` Transformer layers +6. Recuperare l'output dell'intero BrandNewBert Model + +Gli IDs in input dovrebbero essere un arrary di interi, *per esempio* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]` + +Gli output dei seguenti layer di solito dovrebbero essere degli array di float multi-dimensionali come questo: + +``` +[[ + [-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024], + [-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132], + [-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648], + ..., + [-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288], + [-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191], + [-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]], +``` + +Ci aspettiamo che ogni modello aggiunto a 🤗 Transformers passi con successo un paio di test d'integrazione. Questo +significa che il modello originale e la sua implementazione in 🤗 Transformers abbiano lo stesso output con una precisione +di 0.001! Siccome é normale che lo stesso esatto modello, scritto in librerie diverse, possa dare output leggermente +diversi, la tolleranza accettata é 1e-3 (0.001). Ricordate che i due modelli devono dare output quasi identici. Dunque, +é molto conveniente comparare gli output intermedi di 🤗 Transformers molteplici volte con gli output intermedi del +modello originale di *brand_new_bert*. Di seguito vi diamo alcuni consigli per avere un ambiente di debug il piu efficiente +possibile: + +- Trovate la migliore strategia per fare debug dei risultati intermedi. Per esempio, é la repository originale scritta in PyTorch? +Se si, molto probabilmente dovrete dedicare un po' di tempo per scrivere degli script piu lunghi, così da decomporre il +modello originale in piccole sotto-componenti, in modo da poter recuperare i valori intermedi. Oppure, la repo originale +é scritta in Tensorflow 1? Se é così dovrete fare affidamento ai print di Tensorflow [tf.print](https://www.tensorflow.org/api_docs/python/tf/print) +per avere i valori intermedi. Altro caso, la repo é scritta in Jax? Allora assicuratevi che il modello non sia in **jit** +quanto testate il foward pass, *per esempio* controllate [questo link](https://github.com/google/jax/issues/196). +- Usate i più piccoli pretrained checkpoint che potete trovare. Piu piccolo é il checkpoint, piu velocemente sarà il vostro +ciclo di debug. Non é efficiente avere un pretrained model così gigante che per il forward pass impieghi piu di 10 secondi. +Nel caso in cui i checkpoints siano molto grandi, e non si possa trovare di meglio, allora é buona consuetudine ricorrere +a fare un dummy model nel nuovo ambiente, con weights inizializzati random e salvare quei weights per comprare la versione 🤗 Transformers +con il vostro modello +- Accertatevi di usare la via piu semplice per chiamare il forward pass nella repo originale. Sarebbe opportuno trovare +la funzione originaria che chiami **solo** un singolo forward pass, *per esempio* questa funzione spesso viene chiamata +`predict`, `evaluate`, `forward` o `__call__`. Siate sicuri di non fare debug su una funzione che chiami `forward` molteplici +volte, *per esempio* per generare testo, come `autoregressive_sample`, `generate`. +- Cercate di separare la tokenization dal forward pass del modello. Se la repo originaria mostra esempio dove potete dare +come input una stringa, provate a cercare dove nella forward call la stringa viene cambiata in input ids e cominciate il +debug da questo punto. Questo vi garantisce un ottimo punto di partenza per scrivere un piccolo script personale dove dare +gli input al modello, anziche delle stringhe in input. +- Assicuratevi che il debugging **non** sia in training mode. Spesso questo potra il modello a dare degli output random, per +via dei molteplici dropout layers. Assicuratevi che il forward pass nell'ambiente di debug sia **deterministico**, cosicche +i dropout non siano usati. Alternativamente, potete usare *transformers.utils.set_seed* se la vecchia e nuova implementazione +sono nello stesso framework. + +La seguente sezione vi da ulteriori dettagli e accorgimenti su come potete fare tutto questo per *brand_new_bert*. + + +### 5.-14. Trasferire BrandNewBert in 🤗 Transformers + +Allora cominciamo ad aggiungere un nuovo codice in 🤗 Transformers. Andate nel vostro fork clone di 🤗 Transformers: + + +```bash +cd transformers +``` + +Nel caso speciale in cui stiate aggiungendo un modello, la cui architettura sia identica a una di un modello già esistente, +dovrete solo aggiugnere uno script di conversione, come descritto [qui](#write-a-conversion-script). +In questo caso, potete riutilizzare l'intera architettura del modello gia esistente. + +Se questo non é il caso, cominciamo con il generare un nuovo modello. Avrete due opzioni: + +- `transformers-cli add-new-model-like` per aggiungere un nuovo modello come uno che gia esiste +- `transformers-cli add-new-model` per aggiungere un nuovo modello da un nostro template (questo assomigliera a BERT o Bart, in base al modello che selezionerete) + +In entrambi i casi, l'output vi darà un questionario da riempire con informazioni basi sul modello. Il secondo comando richiede di installare +un `cookiecutter` - maggiori informazioni [qui](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model). + +**Aprire una Pull Request in main huggingface/transformers repo** + +Prime di cominciare ad adattare il codice automaticamente generato, aprite una nuova PR come "Work in progress (WIP)", +*per esempio* "[WIP] Aggiungere *brand_new_bert*", cosicché il team di Hugging Face possa lavorare al vostro fianco nell' +integrare il modello in 🤗 Transformers. + +Questi sarebbero gli step generali da seguire: + +1. Creare un branch dal main branch con un nome descrittivo + +```bash +git checkout -b add_brand_new_bert +``` + +2. Commit del codice automaticamente generato + +```bash +git add . +git commit +``` + +3. Fare fetch e rebase del main esistente + +```bash +git fetch upstream +git rebase upstream/main +``` + +4. Push dei cambiamenti al proprio account: + +```bash +git push -u origin a-descriptive-name-for-my-changes +``` + +5. Una volte che siete soddisfatti dei nuovi cambiamenti, andate sulla webpage del vostro fork su GitHub. Cliccate "Pull request". +Assiuratevi di aggiungere alcuni membri di Hugging Face come reviewers, nel riguardo alla destra della pagina della PR, cosicche il team +Hugging Face verrà notificato anche per i futuri cambiamenti. + +6. Cambiare la PR a draft, cliccando su "Convert to draft" alla destra della pagina della PR + +Da quel punto in poi, ricordate di fare commit di ogni progresso e cambiamento, cosicche venga mostrato nella PR. Inoltre, +ricordatevi di tenere aggiornato il vostro lavoro con il main esistente: + +```bash +git fetch upstream +git merge upstream/main +``` + +In generale, tutte le domande che avrete riguardo al modello o l'implementazione dovranno essere fatte nella vostra PR +e discusse/risolte nella PR stessa. In questa maniera, il team di Hugging Face sarà sempre notificato quando farete commit +di un nuovo codice o se avrete qualche domanda. É molto utile indicare al team di Hugging Face il codice a cui fate riferimento +nella domanda, cosicche il team potra facilmente capire il problema o la domanda. + +Per fare questo andate sulla tab "Files changed", dove potrete vedere tutti i vostri cambiamenti al codice, andate sulla linea +dove volete chiedere una domanda, e cliccate sul simbolo "+" per aggiungere un commento. Ogni volta che una domanda o problema +é stato risolto, cliccate sul bottone "Resolve". + +In questa stessa maniera, Hugging Face aprirà domande o commenti nel rivedere il vostro codice. Mi raccomando, chiedete più +domande possibili nella pagina della vostra PR. Se avete domande molto generali, non molto utili per il pubblico, siete liberi +di chiedere al team Hugging Face direttamente su slack o email. + + +**5. Adattare i codici per brand_new_bert** + +Per prima cosa, ci focalizzeremo sul modello e non sui tokenizer. Tutto il codice relative dovrebbe trovarsi in +`src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` e +`src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`. + +Ora potete finalmente cominciare il codice :). Il codice generato in +`src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` avrà sia la stessa architettura di BERT se é un +modello encoder-only o BART se é encoder-decoder. A questo punto, ricordatevi cio che avete imparato all'inizio, riguardo +agli aspetti teorici del modello: *In che maniera il modello che sto implmementando é diverso da BERT o BART?*. Implementare +questi cambi spesso vuol dire cambiare il layer *self-attention*, l'ordine dei layer di normalizzazione e così via... +Ancora una volta ripetiamo, é molto utile vedere architetture simili di modelli gia esistenti in Transformers per avere +un'idea migliore su come implementare il modello. + +**Notate** che a questo punto non dovete avere subito un codice tutto corretto o pulito. Piuttosto, é consigliato cominciare con un +codice poco pulito, con copia-incolla del codice originale in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` +fino a che non avrete tutto il codice necessario. In base alla nostra esperienza, é molto meglio aggiungere una prima bozza +del codice richiesto e poi correggere e migliorare iterativamente. L'unica cosa essenziale che deve funzionare qui é la seguente +instanza: + +```python +from transformers import BrandNewBertModel, BrandNewBertConfig + +model = BrandNewBertModel(BrandNewBertConfig()) +``` + +Questo comando creerà un modello con i parametri di default definiti in `BrandNewBergConfig()` e weights random. Questo garantisce +che `init()` di tutte le componenti funzioni correttamente. + + +**6. Scrivere uno script di conversione** + +Il prossimo step é scrivere uno script per convertire il checkpoint che avete usato per fare debug su *brand_new_berts* nella +repo originale in un checkpoint per la nuova implementazione di *brand_new_bert* in 🤗 Transformers. Non é consigliato scrivere +lo script di conversione da zero, ma piuttosto cercate e guardate script gia esistenti in 🤗 Transformers, così da trovarne +uno simile al vostro modello. Di solito basta fare una copia di uno script gia esistente e adattarlo al vostro caso. +Non esistate a chiedre al team di Hugging Face a riguardo. + +- Se state convertendo un modello da TensorFlow a PyTorch, un ottimo inizio é vedere [questo script di conversione per BERT](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91) +- Se state convertendo un modello da PyTorch a PyTorch, [lo script di conversione di BART può esservi utile](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py) + +Qui di seguito spiegheremo come i modelli PyTorch salvano i weights per ogni layer e come i nomi dei layer sono definiti. In PyTorch, +il nomde del layer é definito dal nome della class attribute che date al layer. Definiamo un modello dummy in PyTorch, +chiamato `SimpleModel`: + +```python +from torch import nn + + +class SimpleModel(nn.Module): + def __init__(self): + super().__init__() + self.dense = nn.Linear(10, 10) + self.intermediate = nn.Linear(10, 10) + self.layer_norm = nn.LayerNorm(10) +``` +Ora possiamo creare un'instanza di questa definizione di modo da inizializzare a random weights: `dense`, `intermediate`, `layer_norm`. +Possiamo usare print per vedere l'architettura del modello: + +```python +model = SimpleModel() + +print(model) +``` + +Da cui si ottiene: + +``` +SimpleModel( + (dense): Linear(in_features=10, out_features=10, bias=True) + (intermediate): Linear(in_features=10, out_features=10, bias=True) + (layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True) +) +``` + +Si può vedere come i nomi dei layers siano definiti dal nome della class attribute in PyTorch. I valori dei weights di uno +specifico layer possono essere visualizzati: + + +```python +print(model.dense.weight.data) +``` + +ad esempio: + +``` +tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212, + -0.2077, 0.2157], + [ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190, + 0.2166, -0.0212], + [-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950, + -0.1023, -0.0447], + [-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415, + -0.1876, -0.2467], + [ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465, + 0.2577, 0.0402], + [ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604, + 0.2132, 0.1680], + [ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090, + 0.2707, -0.2509], + [-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407, + 0.1829, -0.1568], + [-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923, + 0.0333, -0.0536], + [-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739, + 0.2220, 0.2358]]). +``` + +Nello script di conversione, dovreste riempire quei valori di inizializzazione random con gli stessi weights del corrispondente +layer nel checkpoint. *Per esempio* + +```python +# retrieve matching layer weights, e.g. by +# recursive algorithm +layer_name = "dense" +pretrained_weight = array_of_dense_layer + +model_pointer = getattr(model, "dense") + +model_pointer.weight.data = torch.from_numpy(pretrained_weight) +``` + +Così facendo, dovete verificare che ogni inizializzazione random di un peso del modello PyTorch e il suo corrispondente peso nel pretrained checkpoint +siano esattamente gli stessi e uguali in **dimensione/shape e nome**. Per fare questo, é **necessario** aggiungere un `assert` +per la dimensione/shape e nome: + +```python +assert ( + model_pointer.weight.shape == pretrained_weight.shape +), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched" +``` + +Inoltre, dovrete fare il print sia dei nomi che dei weights per essere sicuri che siano gli stessi: + +```python +logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}") +``` + +Se la dimensione o il nome non sono uguali, probabilmente avete sbagliato ad assegnare il peso nel checkpoint o nel layer costrutture di + 🤗 Transformers. + +Una dimensione sbagliata può essere dovuta ad un errore nei parameteri in `BrandNewBertConfig()`. Tuttavia, può essere anche +che l'implementazione del layer in PyTorch richieda di fare una transposizione della matrice dei weights. + +Infine, controllate **tutti** che tutti i weights inizializzati e fate print di tutti i weights del checkpoint che non sono stati +usati per l'inizializzazione, di modo da essere sicuri che il modello sia correttamente convertito. É normale che ci siano +errori nel test di conversione, fai per un errore in `BrandNewBertConfig()`, o un errore nell'architettura in 🤗 Transformers, +o un bug in `init()`. + +Questo step dev'essere fatto tramite iterazioni fino a che non si raggiungano gli stessi valori per i weights. Una volta che +il checkpoint é stato correttamente caricato in 🤗 Transformers, potete salvare il modello in una cartella di vostra scelta +`/path/to/converted/checkpoint/folder` che contenga sia +`pytorch_model.bin` che `config.json`: + +```python +model.save_pretrained("/path/to/converted/checkpoint/folder") +``` + + +**7. Implementare il forward pass** + +Una volta che i weights pretrained sono stati correttamente caricati in 🤗 Transformers, dovrete assicurarvi che il forward pass +sia correttamente implementato. [Qui](#provare-un-pretrained-checkpoint-usando-la-repo-originale), avete give creato e provato +uno script che testi il forward pass del modello usando la repo originaria. Ora dovrete fare lo stesso con uno script analogo +usando l'implementazione in 🤗 Transformers anziché l'originale. Piu o meno lo script dovrebbe essere: + +```python +model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder") +input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19] +output = model(input_ids).last_hidden_states +``` + +Di solito l'output da 🤗 Transformers non é uguale uguale all'output originario, sopratto la prima volta. Non vi abbattete - +é normale! Prima di tutto assicuratevi che non ci siano errori o che non vengano segnalati degli errori nella forward pass. +Spesso capita che ci siano dimensioni sbagliate o data type sbagliati, *ad esempio* `torch.long` anziche `torch.float32`. +Non esistate a chiedere al team Hugging Face! + +Nella parte finale assicuratevi che l'implementazione 🤗 Transformers funzioni correttamente cosi da testare che gli output +siano equivalenti a una precisione di `1e-3`. Controllate che `outputs.shape` siano le stesse tra 🤗 Transformers e l'implementazione +originaria. Poi, controllate che i valori in output siano identici. Questa é sicuramente la parte più difficile, qui una serie +di errori comuni quando gli output non sono uguali: + +- Alcuni layers non sono stati aggiunti, *ad esempio* un *activation* layer non é stato aggiunto, o ci si é scordati di una connessione +- La matrice del word embedding non é stata ripareggiata +- Ci sono degli embeddings posizionali sbagliati perché l'implementazione originaria ha un offset +- Il dropout é in azione durante il forward pass. Per sistemare questo errore controllate che *model.training = False* e che +il dropout non sia stato attivato nel forward pass, * per esempio * passate *self.training* a [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout) + +La miglior maniera per sistemare il problema é di vedere all'implementazione originaria del forward pass e in 🤗 Transformers +fianco a fianco e vedere se ci sono delle differenze. In teoria, con debug e print degli output intermedie di entrambe le +implementazioni nel forward pass nell'esatta posizione del network dovrebbe aiutarvi a vedere dove ci sono differenze tra +i due frameworks. Come prima mossa controllate che `input_ids` siano identici in entrambi gli scripts. Da lì andate fino +all'ultimo layer. Potrete notare una differenza tra le due implementazioni a quel punto. + +Una volta che lo stesso output é stato ragguingi, verificate gli output con `torch.allclose(original_output, output, atol=1e-3)`. +A questo punto se é tutto a posto: complimenti! Le parti seguenti saranno una passeggiata 😊. + + +**8. Aggiungere i test necessari per il modello** + +A questo punto avete aggiunto con successo il vostro nuovo modello. Tuttavia, é molto probabile che il modello non sia +del tutto ok con il design richiesto. Per essere sicuri che l'implementazione sia consona e compatibile con 🤗 Transformers é +necessario implementare dei tests. Il Cookiecutter dovrebbe fornire automaticamente dei file per test per il vostro modello, +di solito nella folder `tests/test_modeling_brand_new_bert.py`. Provate questo per verificare l'ok nei test piu comuni: + +```bash +pytest tests/test_modeling_brand_new_bert.py +``` + +Una volta sistemati i test comuni, bisogna assicurarsi che il vostro lavoro sia correttamente testato cosicchè: + +- a) La community puo capire in maniera semplice il vostro lavoro controllando tests specifici del modello *brand_new_bert*, +- b) Implementazioni future del vostro modello non rompano alcune feature importante del modello. + +Per prima cosa agguingete dei test d'integrazione. Questi sono essenziali perche fanno la stessa funzione degli scripts di +debug usati precedentemente. Un template per questi tests esiste gia nel Cookiecutter ed é sotto il nome di `BrandNewBertModelIntegrationTests`, +voi dovrete solo completarlo. Una volta che questi tests sono OK, provate: + +```bash +RUN_SLOW=1 pytest -sv tests/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests +``` + + + +Nel caso siate su Windows, sostituite `RUN_SLOW=1` con `SET RUN_SLOW=1` + + + +Di seguito, tutte le features che sono utili e necessarire per *brand_new_bert* devono essere testate in test separati, +contenuti in `BrandNewBertModelTester`/ `BrandNewBertModelTest`. spesso la gente si scorda questi test, ma ricordate che sono utili per: + + +- Aiuta gli utenti a capire il vostro codice meglio, richiamando l'attenzione su queste nuove features +- Developers e contributors futuri potranno velocemente testare nuove implementazioni del modello testanto questi casi speciali. + + +**9. Implementare il tokenizer** + +A questo punto avremo bisogno un tokenizer per *brand_new_bert*. Di solito il tokenizer é uguale ad altri modelli in 🤗 Transformers. + +É importante che troviate il file con il tokenizer originale e che lo carichiate in 🤗 Transformers. + +Per controllare che il tokenizer funzioni in modo corretto, create uno script nella repo originaria che riceva come input +una stringa e ritorni gli `input_ids`. Piu o meno questo potrebbe essere il codice: + +```python +input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." +model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/") +input_ids = model.tokenize(input_str) +``` + +Potrebbe richiedere un po' di tempo, ma guardate ancora alla repo originaria per trovare la funzione corretta del tokenizer. +A volte capita di dover riscrivere il tokenizer nella repo originaria, di modo da avere come output gli `input_ids`. +A quel punto uno script analogo é necessario in 🤗 Transformers: + +```python +from transformers import BrandNewBertTokenizer + +input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words." + +tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/") + +input_ids = tokenizer(input_str).input_ids +``` + +Una volta che `input_ids` sono uguali, bisogna aggiungere un test per il tokenizer. + +Il file test per tokenizer di *brand_new_brand* dovrebbe avere un paio di hard-coded test d'integrazione. + + +**10. Test end-to-end** + +Ora che avete il tokenizer, dovrete aggiungere dei test d'integrazione per l'intero workflow in `tests/test_modeling_brand_new_bert.py` in 🤗 Transformer. +Questi test devono mostrare che un significante campione text-to-text funzioni come ci si aspetta nell'implementazione di 🤗 Transformers. +*Per esempio* potreste usare dei source-to-target-translation, o un sommario di un articolo, o un domanda-risposta e cosi via. +Se nessuno dei checkpoints é stato ultra parametrizzato per task simili, allora i tests per il modello sono piu che sufficienti. +Nello step finale dovete assicurarvi che il modello sia totalmente funzionale, e consigliamo anche di provare a testare su GPU. +Puo succedere che ci si scordi un `.to(self.device)` ad esempio. Se non avete accesso a GPU, il team Hugging Face puo provvedere +a testare questo aspetto per voi. + +**11. Aggiungere una Docstring** + +Siete quasi alla fine! L'ultima cosa rimasta é avere una bella docstring e una pagina doc. Il Cookiecutter dovrebbe provvedere già +un template chiamato `docs/source/model_doc/brand_new_bert.rst`, che dovrete compilare. La prima cosa che un utente farà +per usare il vostro modello sarà dare una bella lettura al doc. Quindi proponete una documentazione chiara e concisa. É molto +utile per la community avere anche delle *Tips* per mostrare come il modello puo' essere usato. Non esitate a chiedere a Hugging Face +riguardo alle docstirng. + +Quindi, assicuratevi che la docstring sia stata aggiunta a `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`. +Assicuratevi che la docstring sia corretta e che includa tutti i necessari input e output. Abbiamo una guida dettagliata per +scrivere la documentazione e docstring. + + +**Rifattorizzare il codice** + +Perfetto! Ora che abbiamo tutto per *brand_new_bert* controllate che lo stile del codice sia ok: + +```bash +make style +``` + +E che il codice passi i quality check: + +```bash +make quality +``` + +A volte capita che manchino delle informazioninella docstring o alcuni nomi sbagliati, questo farà fallire i tests sopra. +Ripetiamo: chiedete pure a Hugging Face, saremo lieti di aiutarvi. + +Per ultimo, fare del refactoring del codice una volta che é stato creato. + +Avete finito con il codice, congratulazioni! 🎉 Siete fantasticiiiiiii! 😎 + +**12. Caricare il modello sul model hub** + +In questa ultima parte dovrete convertire e caricare il modello, con tutti i checkpoints, nel model hub e aggiungere una +model card per ogni checkpoint caricato. Leggete la nostra guida [Model sharing and uploading Page](model_sharing) per +avere familiarità con l'hub. Di solito in questa parte lavorate a fianco di Hugging face per decidere un nome che sia ok +per ogni checkpoint, per ottenere i permessi necessari per caricare il modello nell'organizzazione dell'autore di *brand_new_bert*. +Il metodo `push_to_hub`, presente in tutti i modelli `transformers`, é una maniera rapida e indolore per caricare il vostro checkpoint sull'hub: + +```python +brand_new_bert.push_to_hub( + repo_path_or_name="brand_new_bert", + # Uncomment the following line to push to an organization + # organization="", + commit_message="Add model", + use_temp_dir=True, +) +``` + +Vale la pena spendere un po' di tempo per creare una model card ad-hoc per ogni checkpoint. Le model cards dovrebbero +suggerire le caratteristiche specifiche del checkpoint, *per esempio* su che dataset il checkpoint é stato pretrained o fine-tuned. +O che su che genere di task il modello lavoro? E anche buona pratica includere del codice su come usare il modello correttamente. + + +**13. (Opzionale) Aggiungere un notebook** + +É molto utile aggiungere un notebook, che dimostri in dettaglio come *brand_new_bert* si utilizzi per fare inferenza e/o +fine-tuned su specifiche task. Non é una cosa obbligatoria da avere nella vostra PR, ma é molto utile per la community. + +**14. Sottomettere la PR** + +L'ultimissimo step! Ovvero il merge della PR nel main. Di solito il team Hugging face a questo punto vi avrà gia aiutato, +ma é ok prendere un po' di tempo per pulire la descirzione e commenti nel codice. + + +### Condividete il vostro lavoro!! + +É ora tempo di prendere un po' di credito dalla communità per il vostro lavoro! Caricare e implementare un nuovo modello +é un grandissimo contributo per Transformers e l'intera community NLP. Il codice e la conversione dei modelli pre-trained sara +sicuramente utilizzato da centinaia o migliaia di sviluppatori e ricercatori. Siate fieri e orgogliosi di condividere il vostro +traguardo con l'intera community :) + +** Avete create un altro modello che é super facile da usare per tutti quanti nella community! 🤯** From 4dd784c32f76fb8285f205b94e2a6ebde731a1cd Mon Sep 17 00:00:00 2001 From: amyeroberts <22614925+amyeroberts@users.noreply.github.com> Date: Mon, 10 Oct 2022 15:17:41 +0100 Subject: [PATCH 521/539] Fix momentum and epsilon values (#19454) The momentum value for PyTorch and TensorFlow batch normalization layers is not equivalent. The TensorFlow value should be (1 - pytorch_momentum) in order to ensure the correct updates are applied to the running mean and running variance calculations. We wouldn't observe a difference loading a pretrained model and performing inference, but evaluation outputs would change after some training steps. --- .../models/data2vec/modeling_tf_data2vec_vision.py | 4 ++-- src/transformers/models/groupvit/modeling_tf_groupvit.py | 4 ++-- src/transformers/models/regnet/modeling_tf_regnet.py | 4 ++-- src/transformers/models/resnet/modeling_tf_resnet.py | 4 ++-- src/transformers/models/segformer/modeling_tf_segformer.py | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py index 363bbf3ff6daff..5b4de28b7ced31 100644 --- a/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_tf_data2vec_vision.py @@ -1041,7 +1041,7 @@ def __init__( dilation_rate=dilation, name="conv", ) - self.bn = tf.keras.layers.BatchNormalization(name="bn") + self.bn = tf.keras.layers.BatchNormalization(name="bn", momentum=0.9, epsilon=1e-5) self.activation = tf.nn.relu def call(self, input: tf.Tensor) -> tf.Tensor: @@ -1331,7 +1331,7 @@ def __init__(self, config: Data2VecVisionConfig, *inputs, **kwargs) -> None: # FPNs self.fpn1 = [ tf.keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.0"), - tf.keras.layers.BatchNormalization(name="fpn1.1"), + tf.keras.layers.BatchNormalization(name="fpn1.1", momentum=0.9, epsilon=1e-5), tf.keras.layers.Activation("gelu"), tf.keras.layers.Conv2DTranspose(config.hidden_size, kernel_size=2, strides=2, name="fpn1.3"), ] diff --git a/src/transformers/models/groupvit/modeling_tf_groupvit.py b/src/transformers/models/groupvit/modeling_tf_groupvit.py index 481f065eb6f6ac..f86d6a47e378a9 100644 --- a/src/transformers/models/groupvit/modeling_tf_groupvit.py +++ b/src/transformers/models/groupvit/modeling_tf_groupvit.py @@ -1253,13 +1253,13 @@ def __init__(self, config: GroupViTConfig, **kwargs): self.visual_projection = [ tf.keras.layers.Dense(self.projection_intermediate_dim, name="visual_projection.0"), - tf.keras.layers.BatchNormalization(name="visual_projection.1", momentum=0.1, epsilon=1e-5), + tf.keras.layers.BatchNormalization(name="visual_projection.1", momentum=0.9, epsilon=1e-5), tf.keras.layers.ReLU(name="visual_projection.2"), tf.keras.layers.Dense(self.projection_dim, name="visual_projection.3"), ] self.text_projection = [ tf.keras.layers.Dense(self.projection_intermediate_dim, name="text_projection.0"), - tf.keras.layers.BatchNormalization(name="text_projection.1", momentum=0.1, epsilon=1e-5), + tf.keras.layers.BatchNormalization(name="text_projection.1", momentum=0.9, epsilon=1e-5), tf.keras.layers.ReLU(name="text_projection.2"), tf.keras.layers.Dense(self.projection_dim, name="text_projection.3"), ] diff --git a/src/transformers/models/regnet/modeling_tf_regnet.py b/src/transformers/models/regnet/modeling_tf_regnet.py index 1d43d6eb7f8b28..8bbc37951a9933 100644 --- a/src/transformers/models/regnet/modeling_tf_regnet.py +++ b/src/transformers/models/regnet/modeling_tf_regnet.py @@ -74,7 +74,7 @@ def __init__( use_bias=False, name="convolution", ) - self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.1, name="normalization") + self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.activation = ACT2FN[activation] if activation is not None else tf.identity def call(self, hidden_state): @@ -126,7 +126,7 @@ def __init__(self, out_channels: int, stride: int = 2, **kwargs): self.convolution = tf.keras.layers.Conv2D( filters=out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution" ) - self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.1, name="normalization") + self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") def call(self, inputs: tf.Tensor, training: bool = False) -> tf.Tensor: return self.normalization(self.convolution(inputs), training=training) diff --git a/src/transformers/models/resnet/modeling_tf_resnet.py b/src/transformers/models/resnet/modeling_tf_resnet.py index bed053ae404f32..4cf0d21ec777db 100644 --- a/src/transformers/models/resnet/modeling_tf_resnet.py +++ b/src/transformers/models/resnet/modeling_tf_resnet.py @@ -60,7 +60,7 @@ def __init__( out_channels, kernel_size=kernel_size, strides=stride, padding="valid", use_bias=False, name="convolution" ) # Use same default momentum and epsilon as PyTorch equivalent - self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.1, name="normalization") + self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") self.activation = ACT2FN[activation] if activation is not None else tf.keras.layers.Activation("linear") def convolution(self, hidden_state: tf.Tensor) -> tf.Tensor: @@ -119,7 +119,7 @@ def __init__(self, out_channels: int, stride: int = 2, **kwargs) -> None: out_channels, kernel_size=1, strides=stride, use_bias=False, name="convolution" ) # Use same default momentum and epsilon as PyTorch equivalent - self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.1, name="normalization") + self.normalization = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="normalization") def call(self, x: tf.Tensor, training: bool = False) -> tf.Tensor: hidden_state = x diff --git a/src/transformers/models/segformer/modeling_tf_segformer.py b/src/transformers/models/segformer/modeling_tf_segformer.py index 2ff256d78d2b1d..c6a8e001460308 100644 --- a/src/transformers/models/segformer/modeling_tf_segformer.py +++ b/src/transformers/models/segformer/modeling_tf_segformer.py @@ -741,7 +741,7 @@ def __init__(self, config: SegformerConfig, **kwargs): self.linear_fuse = tf.keras.layers.Conv2D( filters=config.decoder_hidden_size, kernel_size=1, use_bias=False, name="linear_fuse" ) - self.batch_norm = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.1, name="batch_norm") + self.batch_norm = tf.keras.layers.BatchNormalization(epsilon=1e-5, momentum=0.9, name="batch_norm") self.activation = tf.keras.layers.Activation("relu") self.dropout = tf.keras.layers.Dropout(config.classifier_dropout_prob) From d866b4858a4e7b51b21f9ff623d8312528e73c49 Mon Sep 17 00:00:00 2001 From: Shivang Mishra <35092323+ShivangMishra@users.noreply.github.com> Date: Mon, 10 Oct 2022 22:02:03 +0530 Subject: [PATCH 522/539] Generate: corrected exponential_decay_length_penalty type hint (#19376) --- src/transformers/generation_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/generation_utils.py b/src/transformers/generation_utils.py index 380eec07270c9c..27b392a23d56da 100644 --- a/src/transformers/generation_utils.py +++ b/src/transformers/generation_utils.py @@ -953,7 +953,7 @@ def generate( forced_eos_token_id: Optional[int] = None, remove_invalid_values: Optional[bool] = None, synced_gpus: Optional[bool] = False, - exponential_decay_length_penalty: Optional[Tuple[Union[int, float]]] = None, + exponential_decay_length_penalty: Optional[Tuple[int, float]] = None, suppress_tokens: Optional[List[int]] = None, begin_suppress_tokens: Optional[List[int]] = None, forced_decoder_ids: Optional[List[int]] = None, From 9df953a855cb6ba48dbc020472fb3f56c886c05b Mon Sep 17 00:00:00 2001 From: Bartosz Szmelczynski <43574448+Bearnardd@users.noreply.github.com> Date: Mon, 10 Oct 2022 18:33:57 +0200 Subject: [PATCH 523/539] Fix misspelled word in docstring (#19415) --- src/transformers/convert_graph_to_onnx.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/transformers/convert_graph_to_onnx.py b/src/transformers/convert_graph_to_onnx.py index 53a518fd57dd65..168f40a43ccf79 100644 --- a/src/transformers/convert_graph_to_onnx.py +++ b/src/transformers/convert_graph_to_onnx.py @@ -133,7 +133,7 @@ def check_onnxruntime_requirements(minimum_version: Version): def ensure_valid_input(model, tokens, input_names): """ - Ensure input are presented in the correct order, without any Non + Ensure inputs are presented in the correct order, without any Non Args: model: The model used to forward the input data From 25cfd911d095ac5aa66c6b9b6154db952c3310d1 Mon Sep 17 00:00:00 2001 From: Mikail Duzenli <45072645+MikailINTech@users.noreply.github.com> Date: Mon, 10 Oct 2022 18:57:28 +0200 Subject: [PATCH 524/539] Fixed a non-working hyperlink in the README.md file (#19434) * Fixed a non-working hyperlink in the README.md file The hyperlink to the community notebooks was outdated. * Fixing missing double slash in hyperlink --- notebooks/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/notebooks/README.md b/notebooks/README.md index fde9001791ac68..7df37d7e7d0919 100644 --- a/notebooks/README.md +++ b/notebooks/README.md @@ -92,4 +92,4 @@ You can open any page of the documentation as a notebook in colab (there is a bu ## Community notebooks: -More notebooks developed by the community are available [here](https:hf.co/docs/transformers/community#community-notebooks). +More notebooks developed by the community are available [here](https://hf.co/docs/transformers/community#community-notebooks). From a7bc4221c0c09857b30ac467e7de86d3f5a7c482 Mon Sep 17 00:00:00 2001 From: Yih-Dar <2521628+ydshieh@users.noreply.github.com> Date: Mon, 10 Oct 2022 19:35:23 +0100 Subject: [PATCH 525/539] fix (#19469) Co-authored-by: ydshieh --- src/transformers/models/flaubert/tokenization_flaubert.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/transformers/models/flaubert/tokenization_flaubert.py b/src/transformers/models/flaubert/tokenization_flaubert.py index 5bc2c59342772a..2475d9dde3ef28 100644 --- a/src/transformers/models/flaubert/tokenization_flaubert.py +++ b/src/transformers/models/flaubert/tokenization_flaubert.py @@ -261,7 +261,8 @@ def __init__( lang2id=lang2id, id2lang=id2lang, do_lowercase_and_remove_accent=do_lowercase_and_remove_accent, - do_lowercase=do_lowercase**kwargs, + do_lowercase=do_lowercase, + **kwargs, ) try: From 692c5be74e1de6a04c71893e0a21fab39979f9ad Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 11 Oct 2022 00:24:36 +0530 Subject: [PATCH 526/539] wrap forward passes with torch.no_grad() (#19439) --- .../visual_bert/test_modeling_visual_bert.py | 68 ++++++++++--------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/tests/models/visual_bert/test_modeling_visual_bert.py b/tests/models/visual_bert/test_modeling_visual_bert.py index 99db914072ccab..92ed812fe47d1e 100644 --- a/tests/models/visual_bert/test_modeling_visual_bert.py +++ b/tests/models/visual_bert/test_modeling_visual_bert.py @@ -568,14 +568,15 @@ def test_inference_vqa_coco_pre(self): attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) - output = model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - visual_embeds=visual_embeds, - visual_attention_mask=visual_attention_mask, - visual_token_type_ids=visual_token_type_ids, - ) + with torch.no_grad(): + output = model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + visual_embeds=visual_embeds, + visual_attention_mask=visual_attention_mask, + visual_token_type_ids=visual_token_type_ids, + ) vocab_size = 30522 @@ -606,14 +607,15 @@ def test_inference_vqa(self): attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) - output = model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - visual_embeds=visual_embeds, - visual_attention_mask=visual_attention_mask, - visual_token_type_ids=visual_token_type_ids, - ) + with torch.no_grad(): + output = model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + visual_embeds=visual_embeds, + visual_attention_mask=visual_attention_mask, + visual_token_type_ids=visual_token_type_ids, + ) # vocab_size = 30522 @@ -637,14 +639,15 @@ def test_inference_nlvr(self): attention_mask = torch.tensor([1] * 6).reshape(1, -1) visual_attention_mask = torch.tensor([1] * 10).reshape(1, -1) - output = model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - visual_embeds=visual_embeds, - visual_attention_mask=visual_attention_mask, - visual_token_type_ids=visual_token_type_ids, - ) + with torch.no_grad(): + output = model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + visual_embeds=visual_embeds, + visual_attention_mask=visual_attention_mask, + visual_token_type_ids=visual_token_type_ids, + ) # vocab_size = 30522 @@ -667,14 +670,15 @@ def test_inference_vcr(self): visual_token_type_ids = torch.ones(size=(1, 4, 10), dtype=torch.long) visual_attention_mask = torch.ones_like(visual_token_type_ids) - output = model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - visual_embeds=visual_embeds, - visual_attention_mask=visual_attention_mask, - visual_token_type_ids=visual_token_type_ids, - ) + with torch.no_grad(): + output = model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + visual_embeds=visual_embeds, + visual_attention_mask=visual_attention_mask, + visual_token_type_ids=visual_token_type_ids, + ) # vocab_size = 30522 From 870a9542be1d6be515bcf14ff506e3a02cf636e3 Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 11 Oct 2022 00:24:54 +0530 Subject: [PATCH 527/539] wrap forward passes with torch.no_grad() (#19438) --- tests/models/roformer/test_modeling_roformer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/roformer/test_modeling_roformer.py b/tests/models/roformer/test_modeling_roformer.py index b1d7f3d8a67c3f..dadb0d8e747b6b 100644 --- a/tests/models/roformer/test_modeling_roformer.py +++ b/tests/models/roformer/test_modeling_roformer.py @@ -457,7 +457,8 @@ class RoFormerModelIntegrationTest(unittest.TestCase): def test_inference_masked_lm(self): model = RoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) - output = model(input_ids)[0] + with torch.no_grad(): + output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 50000 From d739a707d95b5068dee39afe5887e566d13ee6e5 Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 11 Oct 2022 00:33:09 +0530 Subject: [PATCH 528/539] wrap forward passes with torch.no_grad() (#19416) --- tests/models/tapas/test_modeling_tapas.py | 37 ++++++++++++++--------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/tests/models/tapas/test_modeling_tapas.py b/tests/models/tapas/test_modeling_tapas.py index b7b4af6e5a2ad5..271a5efc96163b 100644 --- a/tests/models/tapas/test_modeling_tapas.py +++ b/tests/models/tapas/test_modeling_tapas.py @@ -570,7 +570,8 @@ def test_inference_no_head(self): table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the sequence output expected_slice = torch.tensor( [ @@ -608,7 +609,8 @@ def test_inference_question_answering_head_conversational(self): table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) @@ -657,7 +659,8 @@ def test_inference_question_answering_head_conversational_absolute_embeddings(se table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) @@ -705,7 +708,8 @@ def test_inference_question_answering_head_weak_supervision(self): inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs_on_device = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs_on_device) + with torch.no_grad(): + outputs = model(**inputs_on_device) # test the logits logits = outputs.logits expected_shape = torch.Size((2, 28)) @@ -774,15 +778,16 @@ def test_training_question_answering_head_weak_supervision(self): float_answer = torch.FloatTensor(float_answer).to(torch_device) # forward pass to get loss + logits: - outputs = model( - input_ids=input_ids, - attention_mask=attention_mask, - token_type_ids=token_type_ids, - labels=labels, - numeric_values=numeric_values, - numeric_values_scale=numeric_values_scale, - float_answer=float_answer, - ) + with torch.no_grad(): + outputs = model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + labels=labels, + numeric_values=numeric_values, + numeric_values_scale=numeric_values_scale, + float_answer=float_answer, + ) # test the loss loss = outputs.loss @@ -829,7 +834,8 @@ def test_inference_question_answering_head_strong_supervision(self): table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the logits logits = outputs.logits expected_shape = torch.Size((1, 21)) @@ -884,7 +890,8 @@ def test_inference_classification_head(self): table, queries = prepare_tapas_single_inputs_for_inference() inputs = tokenizer(table=table, queries=queries, padding="longest", return_tensors="pt") inputs = {k: v.to(torch_device) for k, v in inputs.items()} - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # test the classification logits logits = outputs.logits From c6a928cadb929d98ef7e0e6cf772994e81317a6b Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 11 Oct 2022 00:33:24 +0530 Subject: [PATCH 529/539] wrap forward passes with torch.no_grad() (#19414) --- tests/models/imagegpt/test_modeling_imagegpt.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/imagegpt/test_modeling_imagegpt.py b/tests/models/imagegpt/test_modeling_imagegpt.py index 528532d4cd813d..24d41348e1a4dc 100644 --- a/tests/models/imagegpt/test_modeling_imagegpt.py +++ b/tests/models/imagegpt/test_modeling_imagegpt.py @@ -538,7 +538,8 @@ def test_inference_causal_lm_head(self): inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass - outputs = model(**inputs) + with torch.no_grad(): + outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1024, 512)) From 5f5e264a12956bd7cce47dcb422b80ed68e4c24e Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 11 Oct 2022 00:33:46 +0530 Subject: [PATCH 530/539] wrap forward passes with torch.no_grad() (#19413) --- tests/models/fnet/test_modeling_fnet.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/models/fnet/test_modeling_fnet.py b/tests/models/fnet/test_modeling_fnet.py index 974d7c2d4e5d63..5d975b061f75f3 100644 --- a/tests/models/fnet/test_modeling_fnet.py +++ b/tests/models/fnet/test_modeling_fnet.py @@ -493,7 +493,8 @@ def test_inference_for_masked_lm(self): model.to(torch_device) input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device) - output = model(input_ids)[0] + with torch.no_grad(): + output = model(input_ids)[0] vocab_size = 32000 @@ -536,7 +537,8 @@ def test_inference_for_next_sentence_prediction(self): model.to(torch_device) input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device) - output = model(input_ids)[0] + with torch.no_grad(): + output = model(input_ids)[0] expected_shape = torch.Size((1, 2)) self.assertEqual(output.shape, expected_shape) @@ -551,7 +553,8 @@ def test_inference_model(self): model.to(torch_device) input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]], device=torch_device) - output = model(input_ids)[0] + with torch.no_grad(): + output = model(input_ids)[0] expected_shape = torch.Size((1, 6, model.config.hidden_size)) self.assertEqual(output.shape, expected_shape) From df2f28120d2c96f40a2303d9755c26036aecf45f Mon Sep 17 00:00:00 2001 From: Partho Date: Tue, 11 Oct 2022 00:34:10 +0530 Subject: [PATCH 531/539] wrap forward passes with torch.no_grad() (#19412) --- tests/models/flaubert/test_modeling_flaubert.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/models/flaubert/test_modeling_flaubert.py b/tests/models/flaubert/test_modeling_flaubert.py index da29cac6dd588c..b4150a65823575 100644 --- a/tests/models/flaubert/test_modeling_flaubert.py +++ b/tests/models/flaubert/test_modeling_flaubert.py @@ -437,7 +437,8 @@ class FlaubertModelIntegrationTest(unittest.TestCase): def test_inference_no_head_absolute_embedding(self): model = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) - output = model(input_ids)[0] + with torch.no_grad(): + output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( From 10100979ed0594d4cfe1982cdfac9642a68e473e Mon Sep 17 00:00:00 2001 From: Lysandre Date: Mon, 10 Oct 2022 17:25:40 -0400 Subject: [PATCH 532/539] Dev version --- README.md | 12 ++++++------ README_ko.md | 12 ++++++------ README_zh-hans.md | 12 ++++++------ README_zh-hant.md | 12 ++++++------ examples/flax/question-answering/run_qa.py | 2 +- examples/flax/text-classification/run_flax_glue.py | 2 +- examples/flax/token-classification/run_flax_ner.py | 2 +- .../audio-classification/run_audio_classification.py | 2 +- examples/pytorch/contrastive-image-text/run_clip.py | 2 +- .../image-classification/run_image_classification.py | 2 +- .../run_image_classification_no_trainer.py | 2 +- examples/pytorch/image-pretraining/run_mae.py | 2 +- examples/pytorch/image-pretraining/run_mim.py | 2 +- examples/pytorch/language-modeling/run_clm.py | 2 +- .../pytorch/language-modeling/run_clm_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_mlm.py | 2 +- .../pytorch/language-modeling/run_mlm_no_trainer.py | 2 +- examples/pytorch/language-modeling/run_plm.py | 2 +- examples/pytorch/multiple-choice/run_swag.py | 2 +- .../pytorch/multiple-choice/run_swag_no_trainer.py | 2 +- examples/pytorch/question-answering/run_qa.py | 2 +- .../pytorch/question-answering/run_qa_beam_search.py | 2 +- .../run_qa_beam_search_no_trainer.py | 2 +- .../pytorch/question-answering/run_qa_no_trainer.py | 2 +- .../pytorch/question-answering/run_seq2seq_qa.py | 2 +- .../run_semantic_segmentation.py | 2 +- .../run_semantic_segmentation_no_trainer.py | 2 +- .../speech-recognition/run_speech_recognition_ctc.py | 2 +- .../run_speech_recognition_seq2seq.py | 2 +- examples/pytorch/summarization/run_summarization.py | 2 +- .../summarization/run_summarization_no_trainer.py | 2 +- examples/pytorch/text-classification/run_glue.py | 2 +- .../text-classification/run_glue_no_trainer.py | 2 +- examples/pytorch/text-classification/run_xnli.py | 2 +- examples/pytorch/token-classification/run_ner.py | 2 +- .../token-classification/run_ner_no_trainer.py | 2 +- examples/pytorch/translation/run_translation.py | 2 +- .../translation/run_translation_no_trainer.py | 2 +- examples/tensorflow/multiple-choice/run_swag.py | 2 +- examples/tensorflow/question-answering/run_qa.py | 2 +- .../tensorflow/summarization/run_summarization.py | 2 +- examples/tensorflow/text-classification/run_glue.py | 2 +- examples/tensorflow/translation/run_translation.py | 2 +- setup.py | 2 +- src/transformers/__init__.py | 2 +- 45 files changed, 65 insertions(+), 65 deletions(-) diff --git a/README.md b/README.md index 4cdf062c188ea1..a1c6c4a77041a6 100644 --- a/README.md +++ b/README.md @@ -278,7 +278,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. -1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. +1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. 1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. 1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. 1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. @@ -300,7 +300,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. -1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. +1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. @@ -329,7 +329,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert. 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. -1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. +1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. 1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov. 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. @@ -375,7 +375,7 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. -1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace). 1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. @@ -388,12 +388,12 @@ Current number of checkpoints: ![](https://img.shields.io/endpoint?url=https://h 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. -1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. +1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. -1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. +1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. diff --git a/README_ko.md b/README_ko.md index 7d72b9f6a3ac92..3453ce57b1bf32 100644 --- a/README_ko.md +++ b/README_ko.md @@ -228,7 +228,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. -1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. +1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. 1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. 1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. 1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. @@ -250,7 +250,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. -1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. +1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. @@ -279,7 +279,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert. 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. -1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. +1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. 1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov. 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. @@ -325,7 +325,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. -1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace). 1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. @@ -338,12 +338,12 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. -1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. +1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. -1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. +1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. diff --git a/README_zh-hans.md b/README_zh-hans.md index 474ba18d58319d..6b9d5168feace9 100644 --- a/README_zh-hans.md +++ b/README_zh-hans.md @@ -252,7 +252,7 @@ conda install -c huggingface transformers 1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (来自 Google Research) 伴随论文 [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) 由 Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting 发布。 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (来自 Salesforce) 伴随论文 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 由 Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 发布。 -1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。 +1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。 1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (来自 YituTech) 伴随论文 [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) 由 Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan 发布。 1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (来自 Facebook AI) 伴随论文 [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) 由 Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie 发布。 1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (来自 Tsinghua University) 伴随论文 [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) 由 Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun 发布。 @@ -274,7 +274,7 @@ conda install -c huggingface transformers 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。 -1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. +1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (来自 CNRS) 伴随论文 [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) 由 Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab 发布。 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (来自 Facebook AI) 伴随论文 [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) 由 Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela 发布。 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (来自 Google Research) 伴随论文 [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) 由 James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon 发布。 @@ -303,7 +303,7 @@ conda install -c huggingface transformers 1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (来自 Facebook) 伴随论文 [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) 由 Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert 发布。 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (来自 Facebook) 伴随论文 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 由 Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 发布。 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** 用 [OPUS](http://opus.nlpl.eu/) 数据训练的机器翻译模型由 Jörg Tiedemann 发布。[Marian Framework](https://marian-nmt.github.io/) 由微软翻译团队开发。 -1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (来自 Microsoft Research Asia) 伴随论文 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 由 Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 发布。 +1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (来自 Microsoft Research Asia) 伴随论文 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 由 Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 发布。 1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov >>>>>>> Fix rebase 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) 由 Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer 发布。 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) 由 Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan 发布。 @@ -349,7 +349,7 @@ conda install -c huggingface transformers 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (来自 Microsoft Research) 伴随论文 [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) 由 Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou 发布。 -1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace). 1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。 @@ -362,12 +362,12 @@ conda install -c huggingface transformers 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (来自 Meta AI) 伴随论文 [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) 由 Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick 发布。 -1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (来自 Meta AI) 伴随论文 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 发布. +1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (来自 Meta AI) 伴随论文 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 发布. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (来自 Facebook AI) 伴随论文 [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) 由 Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli 发布。 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (来自 Facebook AI) 伴随论文 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 发布。 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. -1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (来自 OpenAI) 伴随论文 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 由 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 发布。 +1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (来自 OpenAI) 伴随论文 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 由 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 发布。 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。 diff --git a/README_zh-hant.md b/README_zh-hant.md index ebc75e23adb6fb..bcf13d8ea47722 100644 --- a/README_zh-hant.md +++ b/README_zh-hant.md @@ -264,7 +264,7 @@ conda install -c huggingface transformers 1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. 1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever. 1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong. -1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. +1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang. 1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan. 1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. 1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun. @@ -286,7 +286,7 @@ conda install -c huggingface transformers 1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning. 1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn. 1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu. -1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. +1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives. 1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab. 1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela. 1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. @@ -315,7 +315,7 @@ conda install -c huggingface transformers 1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert. 1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin. 1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team. -1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. +1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. 1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov 1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer. 1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan. @@ -361,7 +361,7 @@ conda install -c huggingface transformers 1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu. 1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos. 1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou. -1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace). +1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace). 1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine 1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov. 1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei. @@ -374,12 +374,12 @@ conda install -c huggingface transformers 1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby. 1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang. 1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick. -1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. +1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas. 1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli. 1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino. 1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli. 1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei. -1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. +1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever. 1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling. 1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li. 1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau. diff --git a/examples/flax/question-answering/run_qa.py b/examples/flax/question-answering/run_qa.py index c044ff628c8abb..27c52a1ebcf286 100644 --- a/examples/flax/question-answering/run_qa.py +++ b/examples/flax/question-answering/run_qa.py @@ -61,7 +61,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") Array = Any Dataset = datasets.arrow_dataset.Dataset diff --git a/examples/flax/text-classification/run_flax_glue.py b/examples/flax/text-classification/run_flax_glue.py index 29c2b511fcaa4f..4b9a8554d2ae9a 100755 --- a/examples/flax/text-classification/run_flax_glue.py +++ b/examples/flax/text-classification/run_flax_glue.py @@ -54,7 +54,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") Array = Any Dataset = datasets.arrow_dataset.Dataset diff --git a/examples/flax/token-classification/run_flax_ner.py b/examples/flax/token-classification/run_flax_ner.py index 929e249f446a99..77b734aba1e3ab 100644 --- a/examples/flax/token-classification/run_flax_ner.py +++ b/examples/flax/token-classification/run_flax_ner.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") diff --git a/examples/pytorch/audio-classification/run_audio_classification.py b/examples/pytorch/audio-classification/run_audio_classification.py index e7d7f30a67dd04..9b2f3931b44778 100644 --- a/examples/pytorch/audio-classification/run_audio_classification.py +++ b/examples/pytorch/audio-classification/run_audio_classification.py @@ -45,7 +45,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") diff --git a/examples/pytorch/contrastive-image-text/run_clip.py b/examples/pytorch/contrastive-image-text/run_clip.py index 1296d0675fa9c6..9f3f603b70285d 100644 --- a/examples/pytorch/contrastive-image-text/run_clip.py +++ b/examples/pytorch/contrastive-image-text/run_clip.py @@ -54,7 +54,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt") diff --git a/examples/pytorch/image-classification/run_image_classification.py b/examples/pytorch/image-classification/run_image_classification.py index eb20c9efedf77c..13f137450d9b36 100644 --- a/examples/pytorch/image-classification/run_image_classification.py +++ b/examples/pytorch/image-classification/run_image_classification.py @@ -55,7 +55,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt") diff --git a/examples/pytorch/image-classification/run_image_classification_no_trainer.py b/examples/pytorch/image-classification/run_image_classification_no_trainer.py index 902ba962986e88..664a96c3c23ffc 100644 --- a/examples/pytorch/image-classification/run_image_classification_no_trainer.py +++ b/examples/pytorch/image-classification/run_image_classification_no_trainer.py @@ -53,7 +53,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/image-pretraining/run_mae.py b/examples/pytorch/image-pretraining/run_mae.py index 9e8c366d3d2728..f9419fa9b18d3b 100644 --- a/examples/pytorch/image-pretraining/run_mae.py +++ b/examples/pytorch/image-pretraining/run_mae.py @@ -43,7 +43,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") diff --git a/examples/pytorch/image-pretraining/run_mim.py b/examples/pytorch/image-pretraining/run_mim.py index c05c1b1333f606..6181920a1e60f7 100644 --- a/examples/pytorch/image-pretraining/run_mim.py +++ b/examples/pytorch/image-pretraining/run_mim.py @@ -48,7 +48,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_clm.py b/examples/pytorch/language-modeling/run_clm.py index f0a63ef732d14d..da8724fdedb5c0 100755 --- a/examples/pytorch/language-modeling/run_clm.py +++ b/examples/pytorch/language-modeling/run_clm.py @@ -54,7 +54,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_clm_no_trainer.py b/examples/pytorch/language-modeling/run_clm_no_trainer.py index 3e1c9048334455..d22fa4a49a7ed5 100755 --- a/examples/pytorch/language-modeling/run_clm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_clm_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/language-modeling/run_mlm.py b/examples/pytorch/language-modeling/run_mlm.py index 576fc3528febae..e98d5c3cc8b79e 100755 --- a/examples/pytorch/language-modeling/run_mlm.py +++ b/examples/pytorch/language-modeling/run_mlm.py @@ -53,7 +53,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_mlm_no_trainer.py b/examples/pytorch/language-modeling/run_mlm_no_trainer.py index 9ce3cdd09f83bd..525bdf36890cde 100755 --- a/examples/pytorch/language-modeling/run_mlm_no_trainer.py +++ b/examples/pytorch/language-modeling/run_mlm_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/language-modeling/run_plm.py b/examples/pytorch/language-modeling/run_plm.py index 12430b2a8985fc..fba8746bbcf187 100755 --- a/examples/pytorch/language-modeling/run_plm.py +++ b/examples/pytorch/language-modeling/run_plm.py @@ -47,7 +47,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") diff --git a/examples/pytorch/multiple-choice/run_swag.py b/examples/pytorch/multiple-choice/run_swag.py index c8084e25a22138..a9e63335dfd70b 100755 --- a/examples/pytorch/multiple-choice/run_swag.py +++ b/examples/pytorch/multiple-choice/run_swag.py @@ -47,7 +47,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/pytorch/multiple-choice/run_swag_no_trainer.py b/examples/pytorch/multiple-choice/run_swag_no_trainer.py index 7235d9494e30a0..a03accc68dc454 100755 --- a/examples/pytorch/multiple-choice/run_swag_no_trainer.py +++ b/examples/pytorch/multiple-choice/run_swag_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = get_logger(__name__) # You should update this to your particular problem to have better documentation of `model_type` diff --git a/examples/pytorch/question-answering/run_qa.py b/examples/pytorch/question-answering/run_qa.py index 65940cd889fb9c..d7348f37c08363 100755 --- a/examples/pytorch/question-answering/run_qa.py +++ b/examples/pytorch/question-answering/run_qa.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_qa_beam_search.py b/examples/pytorch/question-answering/run_qa_beam_search.py index a0a1a6c51c4e52..61001f11208117 100755 --- a/examples/pytorch/question-answering/run_qa_beam_search.py +++ b/examples/pytorch/question-answering/run_qa_beam_search.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py index e1a70a980d3cb9..a79d03852eabc5 100644 --- a/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_qa_no_trainer.py b/examples/pytorch/question-answering/run_qa_no_trainer.py index 49d5b752bfb564..5581949006b05e 100755 --- a/examples/pytorch/question-answering/run_qa_no_trainer.py +++ b/examples/pytorch/question-answering/run_qa_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/question-answering/run_seq2seq_qa.py b/examples/pytorch/question-answering/run_seq2seq_qa.py index abcc11fbb3edb7..a889870dab5975 100644 --- a/examples/pytorch/question-answering/run_seq2seq_qa.py +++ b/examples/pytorch/question-answering/run_seq2seq_qa.py @@ -45,7 +45,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py index bf8099135702bb..21fdc9e76e6da6 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation.py @@ -51,7 +51,7 @@ logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") diff --git a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py index 8eb18434879acb..ffbcd97aa09af8 100644 --- a/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py +++ b/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py index 904a297c5a86d4..ce04c293fb4fe1 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_ctc.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") diff --git a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py index e372bd833aac8d..0c9444720911a9 100755 --- a/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py +++ b/examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") diff --git a/examples/pytorch/summarization/run_summarization.py b/examples/pytorch/summarization/run_summarization.py index e8969d216f945a..c0da4a59cb50ea 100755 --- a/examples/pytorch/summarization/run_summarization.py +++ b/examples/pytorch/summarization/run_summarization.py @@ -52,7 +52,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/examples/pytorch/summarization/run_summarization_no_trainer.py b/examples/pytorch/summarization/run_summarization_no_trainer.py index 9b8ee1a1dc876d..155f41d5093281 100644 --- a/examples/pytorch/summarization/run_summarization_no_trainer.py +++ b/examples/pytorch/summarization/run_summarization_no_trainer.py @@ -56,7 +56,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/examples/pytorch/text-classification/run_glue.py b/examples/pytorch/text-classification/run_glue.py index 3f97fc3f5e1b44..6e343d1af9a8e7 100755 --- a/examples/pytorch/text-classification/run_glue.py +++ b/examples/pytorch/text-classification/run_glue.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") diff --git a/examples/pytorch/text-classification/run_glue_no_trainer.py b/examples/pytorch/text-classification/run_glue_no_trainer.py index ff972a73bdf0d6..5f54560d24ae68 100644 --- a/examples/pytorch/text-classification/run_glue_no_trainer.py +++ b/examples/pytorch/text-classification/run_glue_no_trainer.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = get_logger(__name__) diff --git a/examples/pytorch/text-classification/run_xnli.py b/examples/pytorch/text-classification/run_xnli.py index 2b512a2bb1b302..5878419c1e073e 100755 --- a/examples/pytorch/text-classification/run_xnli.py +++ b/examples/pytorch/text-classification/run_xnli.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") diff --git a/examples/pytorch/token-classification/run_ner.py b/examples/pytorch/token-classification/run_ner.py index 6541593c5d0e09..edbc3e3d278abf 100755 --- a/examples/pytorch/token-classification/run_ner.py +++ b/examples/pytorch/token-classification/run_ner.py @@ -49,7 +49,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") diff --git a/examples/pytorch/token-classification/run_ner_no_trainer.py b/examples/pytorch/token-classification/run_ner_no_trainer.py index b8c3a9eae8a755..6a273428ccc36d 100755 --- a/examples/pytorch/token-classification/run_ner_no_trainer.py +++ b/examples/pytorch/token-classification/run_ner_no_trainer.py @@ -55,7 +55,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") diff --git a/examples/pytorch/translation/run_translation.py b/examples/pytorch/translation/run_translation.py index 9df352f63dd997..a2c446b58800df 100755 --- a/examples/pytorch/translation/run_translation.py +++ b/examples/pytorch/translation/run_translation.py @@ -52,7 +52,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt") diff --git a/examples/pytorch/translation/run_translation_no_trainer.py b/examples/pytorch/translation/run_translation_no_trainer.py index 8018652eae2c18..db4e4af3ff0b94 100644 --- a/examples/pytorch/translation/run_translation_no_trainer.py +++ b/examples/pytorch/translation/run_translation_no_trainer.py @@ -57,7 +57,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt") diff --git a/examples/tensorflow/multiple-choice/run_swag.py b/examples/tensorflow/multiple-choice/run_swag.py index 34d8281ac5a03e..fa5f13b726ab9f 100644 --- a/examples/tensorflow/multiple-choice/run_swag.py +++ b/examples/tensorflow/multiple-choice/run_swag.py @@ -50,7 +50,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/tensorflow/question-answering/run_qa.py b/examples/tensorflow/question-answering/run_qa.py index 9bba2e54e915ba..ebcdb71160eb59 100755 --- a/examples/tensorflow/question-answering/run_qa.py +++ b/examples/tensorflow/question-answering/run_qa.py @@ -48,7 +48,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") logger = logging.getLogger(__name__) diff --git a/examples/tensorflow/summarization/run_summarization.py b/examples/tensorflow/summarization/run_summarization.py index 21ad8e4fa02af9..a400eb4b6328f1 100644 --- a/examples/tensorflow/summarization/run_summarization.py +++ b/examples/tensorflow/summarization/run_summarization.py @@ -53,7 +53,7 @@ # region Checking dependencies # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/examples/tensorflow/text-classification/run_glue.py b/examples/tensorflow/text-classification/run_glue.py index ab91e84a59653a..a54f858b8c78ec 100644 --- a/examples/tensorflow/text-classification/run_glue.py +++ b/examples/tensorflow/text-classification/run_glue.py @@ -47,7 +47,7 @@ # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") task_to_keys = { "cola": ("sentence", None), diff --git a/examples/tensorflow/translation/run_translation.py b/examples/tensorflow/translation/run_translation.py index 6632dbe9105867..627cb856de2bdc 100644 --- a/examples/tensorflow/translation/run_translation.py +++ b/examples/tensorflow/translation/run_translation.py @@ -56,7 +56,7 @@ # region Dependencies and constants # Will error if the minimal version of Transformers is not installed. Remove at your own risks. -check_min_version("4.23.0.dev0") +check_min_version("4.24.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") diff --git a/setup.py b/setup.py index 84c5df4793ef02..13f8b42d976080 100644 --- a/setup.py +++ b/setup.py @@ -408,7 +408,7 @@ def run(self): setup( name="transformers", - version="4.23.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) + version="4.24.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)", author_email="transformers@huggingface.co", description="State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow", diff --git a/src/transformers/__init__.py b/src/transformers/__init__.py index b634ffea72b119..bb3664049f1a59 100755 --- a/src/transformers/__init__.py +++ b/src/transformers/__init__.py @@ -22,7 +22,7 @@ # to defer the actual importing for when the objects are requested. This way `import transformers` provides the names # in the namespace without actually importing anything (and especially none of the backends). -__version__ = "4.23.0.dev0" +__version__ = "4.24.0.dev0" from typing import TYPE_CHECKING From d7d71c8adaeafdb9f809c9eff787b0ad974d8217 Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 22:18:57 +0700 Subject: [PATCH 533/539] Compute true loss --- .../run_image_captioning_flax.py | 34 +++++++++++-------- examples/flax/language-modeling/README.md | 2 +- .../language-modeling/run_bart_dlm_flax.py | 31 ++++++++++------- .../flax/language-modeling/run_mlm_flax.py | 29 +++++++++------- .../summarization/run_summarization_flax.py | 34 +++++++++++-------- 5 files changed, 76 insertions(+), 54 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 5b3fd187f04179..6d338c525fdc71 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -926,8 +926,9 @@ def loss_fn(logits, labels, padding_mask, label_smoothing_factor=0.0): # ignore padded tokens from loss loss = loss * padding_mask - loss = loss.sum() / padding_mask.sum() - return loss + loss = loss.sum() + num_labels = padding_mask.sum() + return loss, num_labels # Define gradient update step fn def train_step(state, batch, label_smoothing_factor=0.0): @@ -936,17 +937,21 @@ def train_step(state, batch, label_smoothing_factor=0.0): def compute_loss(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) - return loss - - grad_fn = jax.value_and_grad(compute_loss) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") - + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + return loss, num_labels + + grad_fn = jax.value_and_grad(compute_loss, has_aux=True) + (loss, num_labels), grad = grad_fn(state.params) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples + grad = jax.lax.psum(grad, "batch") + grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics @@ -954,11 +959,12 @@ def compute_loss(params): def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] - loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) - + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) # summarize metrics metrics = {"loss": loss} - metrics = jax.lax.pmean(metrics, axis_name="batch") return metrics # Define generation function @@ -1253,4 +1259,4 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/examples/flax/language-modeling/README.md b/examples/flax/language-modeling/README.md index 5b83ed06545946..75fd64eb0856a5 100644 --- a/examples/flax/language-modeling/README.md +++ b/examples/flax/language-modeling/README.md @@ -351,7 +351,7 @@ The example script uses the 🤗 Datasets library. You can easily customize them To setup all relevant files for training, let's create a directory. ```bash -mkdir ./norwegian-roberta-base +mkdir ./norwegian-bart-base ``` ### Train tokenizer diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 6396f4ced99695..57f736a7dd5cab 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -799,18 +799,23 @@ def loss_fn(params): loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average - loss = loss.sum() / label_mask.sum() - - return loss - - grad_fn = jax.value_and_grad(loss_fn) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") + loss = loss.sum() + num_labels = label_mask.sum() + + return loss, num_labels + + grad_fn = jax.value_and_grad(loss_fn, has_aux=True) + (loss, num_labels), grad = grad_fn(state.params) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples + grad = jax.lax.psum(grad, "batch") + grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) - metrics = jax.lax.pmean( - {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" - ) + metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} return new_state, metrics, new_dropout_rng @@ -888,7 +893,7 @@ def eval_step(params, batch): num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) - eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) + eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) eval_metrics = [] for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): @@ -928,7 +933,7 @@ def eval_step(params, batch): num_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) - eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False) + eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) eval_metrics = [] for _, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): @@ -961,4 +966,4 @@ def eval_step(params, batch): if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index 5e1519bbd5f215..408a890bd32217 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -718,23 +718,28 @@ def loss_fn(params): logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - # compute loss, ignore padded input tokens + # compute loss, ignore padded input tokens and special tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average - loss = loss.sum() / label_mask.sum() - - return loss - - grad_fn = jax.value_and_grad(loss_fn) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") + loss = loss.sum() + num_labels = label_mask.sum() + + return loss, num_labels + + grad_fn = jax.value_and_grad(loss_fn, has_aux=True) + (loss, num_labels), grad = grad_fn(state.params) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples + grad = jax.lax.psum(grad, "batch") + grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) - metrics = jax.lax.pmean( - {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" - ) + metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} return new_state, metrics, new_dropout_rng @@ -885,4 +890,4 @@ def eval_step(params, batch): if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index ed151b8bbe031d..e1089ce18e661b 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -784,8 +784,9 @@ def loss_fn(logits, labels, padding_mask, label_smoothing_factor=0.0): # ignore padded tokens from loss loss = loss * padding_mask - loss = loss.sum() / padding_mask.sum() - return loss + loss = loss.sum() + num_labels = padding_mask.sum() + return loss, num_labels # Define gradient update step fn def train_step(state, batch, label_smoothing_factor=0.0): @@ -794,17 +795,21 @@ def train_step(state, batch, label_smoothing_factor=0.0): def compute_loss(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) - return loss - - grad_fn = jax.value_and_grad(compute_loss) - loss, grad = grad_fn(state.params) - grad = jax.lax.pmean(grad, "batch") - + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + return loss, num_labels + + grad_fn = jax.value_and_grad(compute_loss, has_aux=True) + (loss, num_labels), grad = grad_fn(state.params) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples + grad = jax.lax.psum(grad, "batch") + grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics @@ -812,11 +817,12 @@ def compute_loss(params): def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] - loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) - + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + # true loss = total loss / total samples + loss = jax.lax.psum(loss, "batch") + loss = jax.tree_map(lambda x: x / num_labels, loss) # summarize metrics metrics = {"loss": loss} - metrics = jax.lax.pmean(metrics, axis_name="batch") return metrics # Define generation function @@ -979,4 +985,4 @@ def generate_step(params, batch): if __name__ == "__main__": - main() + main() \ No newline at end of file From b5ccda08ce951c6a4ba18fbdedb0345a62fc0c6f Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 22:28:59 +0700 Subject: [PATCH 534/539] final --- .../flax/image-captioning/run_image_captioning_flax.py | 8 ++++++-- examples/flax/summarization/run_summarization_flax.py | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 6d338c525fdc71..7341c89877f543 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -943,27 +943,31 @@ def compute_loss(params): grad_fn = jax.value_and_grad(compute_loss, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - return new_state, metrics # Define eval fn def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) - # summarize metrics + metrics = {"loss": loss} return metrics diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index e1089ce18e661b..10008448979bd6 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -801,27 +801,31 @@ def compute_loss(params): grad_fn = jax.value_and_grad(compute_loss, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - return new_state, metrics # Define eval fn def eval_step(params, batch, label_smoothing_factor=0.0): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] + loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor) + num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) - # summarize metrics + metrics = {"loss": loss} return metrics From 135cb98f674fad99447c68c614fdd9c7b84ee0f8 Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 15:24:50 +0000 Subject: [PATCH 535/539] fixup --- .../image-captioning/run_image_captioning_flax.py | 13 +------------ .../flax/language-modeling/run_bart_dlm_flax.py | 2 +- examples/flax/language-modeling/run_mlm_flax.py | 2 +- examples/flax/language-modeling/run_t5_mlm_flax.py | 2 -- .../flax/summarization/run_summarization_flax.py | 2 +- 5 files changed, 4 insertions(+), 17 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 7341c89877f543..753a427ce5d423 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -335,7 +335,6 @@ def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuf batch_idx = np.arange(len(dataset)) for idx in range(steps): - start_idx = batch_size * idx end_idx = batch_size * (idx + 1) @@ -347,7 +346,6 @@ def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuf def write_metric(summary_writer, metrics, train_time, step, metric_key_prefix="train"): - if train_time: summary_writer.scalar("train_time", train_time, step) @@ -782,11 +780,9 @@ def blockwise_data_loader( num_splits = steps // steps_per_block + int(steps % steps_per_block > 0) for idx in range(num_splits): - if not block_size: _ds = ds else: - start_idx = block_size * idx end_idx = block_size * (idx + 1) @@ -1034,7 +1030,6 @@ def evaluation_loop( ckpt_dir: str = "", is_prediction=False, ): - logger.info(f"*** {'Predict' if is_prediction else 'Evaluate'} ***") metrics = [] @@ -1113,12 +1108,10 @@ def evaluation_loop( logger.info(desc) if jax.process_index() == 0: - if not os.path.isdir(os.path.join(training_args.output_dir, ckpt_dir)): os.makedirs(os.path.join(training_args.output_dir, ckpt_dir), exist_ok=True) if metrics: - # Save metrics (only for the evaluation/prediction being done along with training) if has_tensorboard and training_args.do_train: write_metric( @@ -1153,7 +1146,6 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): input_rng = None if training_args.do_train: - cur_step = 0 train_time = 0 epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) @@ -1176,7 +1168,6 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): # train for batch_idx, _ in enumerate(tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False)): - cur_step += 1 batch = next(train_batches) batch_start = time.time() @@ -1187,7 +1178,6 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): # log and save info if training_args.logging_steps > 0 and cur_step % training_args.logging_steps == 0: - _train_metric = unreplicate(train_metric) desc = ( f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | Loss: {_train_metric['loss']} |" @@ -1227,7 +1217,6 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): # log and save info if training_args.logging_steps <= 0: - logger.info(desc) with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp: @@ -1263,4 +1252,4 @@ def predict(rng: jax.random.PRNGKey, dataset: Dataset): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 57f736a7dd5cab..397f3ed8cf27a3 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -966,4 +966,4 @@ def eval_step(params, batch): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index 408a890bd32217..71ac0bb8f860e5 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -890,4 +890,4 @@ def eval_step(params, batch): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/flax/language-modeling/run_t5_mlm_flax.py b/examples/flax/language-modeling/run_t5_mlm_flax.py index fd988890d02cc7..ceae49c6b109d3 100755 --- a/examples/flax/language-modeling/run_t5_mlm_flax.py +++ b/examples/flax/language-modeling/run_t5_mlm_flax.py @@ -328,7 +328,6 @@ class FlaxDataCollatorForT5MLM: decoder_start_token_id: int def __call__(self, examples: List[Dict[str, np.ndarray]]) -> BatchEncoding: - # convert list to dict and tensorize input batch = BatchEncoding( {k: np.array([examples[i][k] for i in range(len(examples))]) for k, v in examples[0].items()} @@ -397,7 +396,6 @@ def filter_input_ids(self, input_ids, sentinel_ids): return input_ids def random_spans_noise_mask(self, length): - """This function is copy of `random_spans_helper `__ . Noise mask consisting of random spans of noise tokens. diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index 10008448979bd6..55777a4301bcc9 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -989,4 +989,4 @@ def generate_step(params, batch): if __name__ == "__main__": - main() \ No newline at end of file + main() From d94d04fffdbdc9551262a9ee412c53c6c2df9bcd Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 22:31:00 +0700 Subject: [PATCH 536/539] final --- examples/flax/language-modeling/run_bart_dlm_flax.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index 397f3ed8cf27a3..bfb2090b0f7962 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -807,16 +807,17 @@ def loss_fn(params): grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} - return new_state, metrics, new_dropout_rng # Create parallel version of the train step From d0ccf0087a983bce02a789b7ff56d0b00eb948a5 Mon Sep 17 00:00:00 2001 From: duongna21 Date: Wed, 3 Aug 2022 22:32:43 +0700 Subject: [PATCH 537/539] final --- examples/flax/language-modeling/run_mlm_flax.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index 71ac0bb8f860e5..fd0f941ebceb3b 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -718,7 +718,7 @@ def loss_fn(params): logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] - # compute loss, ignore padded input tokens and special tokens + # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask @@ -731,9 +731,11 @@ def loss_fn(params): grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (loss, num_labels), grad = grad_fn(state.params) num_labels = jax.lax.psum(num_labels, "batch") + # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") loss = jax.tree_map(lambda x: x / num_labels, loss) + # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") grad = jax.tree_map(lambda x: x / num_labels, grad) From 418f6c43b6be0cc708524aae6394f3d1aab06eb3 Mon Sep 17 00:00:00 2001 From: "Duong A. Nguyen" <38061659+duongna21@users.noreply.github.com> Date: Tue, 9 Aug 2022 10:20:56 +0700 Subject: [PATCH 538/539] Update examples/flax/language-modeling/run_bart_dlm_flax.py Co-authored-by: Sanchit Gandhi <93869735+sanchit-gandhi@users.noreply.github.com> --- examples/flax/language-modeling/run_bart_dlm_flax.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index bfb2090b0f7962..aa9b037a29b223 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -810,7 +810,7 @@ def loss_fn(params): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") From b90b5ae8831d0d9f24c948f606e0bbb8adfda124 Mon Sep 17 00:00:00 2001 From: duongna21 Date: Tue, 9 Aug 2022 13:34:02 +0700 Subject: [PATCH 539/539] jax.tree_map => jax.tree_util.tree_map --- .../image-captioning/run_image_captioning_flax.py | 6 +++--- examples/flax/language-modeling/run_bart_dlm_flax.py | 12 ++++++------ examples/flax/language-modeling/run_mlm_flax.py | 4 ++-- .../flax/summarization/run_summarization_flax.py | 6 +++--- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/examples/flax/image-captioning/run_image_captioning_flax.py b/examples/flax/image-captioning/run_image_captioning_flax.py index 753a427ce5d423..493caa6c7bfab6 100644 --- a/examples/flax/image-captioning/run_image_captioning_flax.py +++ b/examples/flax/image-captioning/run_image_captioning_flax.py @@ -942,11 +942,11 @@ def compute_loss(params): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") - grad = jax.tree_map(lambda x: x / num_labels, grad) + grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} @@ -962,7 +962,7 @@ def eval_step(params, batch, label_smoothing_factor=0.0): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) metrics = {"loss": loss} return metrics diff --git a/examples/flax/language-modeling/run_bart_dlm_flax.py b/examples/flax/language-modeling/run_bart_dlm_flax.py index aa9b037a29b223..6872e59345f2b3 100644 --- a/examples/flax/language-modeling/run_bart_dlm_flax.py +++ b/examples/flax/language-modeling/run_bart_dlm_flax.py @@ -814,7 +814,7 @@ def loss_fn(params): # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") - grad = jax.tree_map(lambda x: x / num_labels, grad) + grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} @@ -909,9 +909,9 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(jnp.sum, eval_metrics) + eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})" @@ -923,7 +923,7 @@ def eval_step(params, batch): if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: - params = jax.device_get(jax.tree_map(lambda x: x[0], state.params)) + params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params) tokenizer.save_pretrained(training_args.output_dir) if training_args.push_to_hub: @@ -949,9 +949,9 @@ def eval_step(params, batch): # normalize eval metrics eval_metrics = get_metrics(eval_metrics) - eval_metrics = jax.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") - eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics) + eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) try: perplexity = math.exp(eval_metrics["loss"]) diff --git a/examples/flax/language-modeling/run_mlm_flax.py b/examples/flax/language-modeling/run_mlm_flax.py index fd0f941ebceb3b..2383492aa497a8 100755 --- a/examples/flax/language-modeling/run_mlm_flax.py +++ b/examples/flax/language-modeling/run_mlm_flax.py @@ -734,11 +734,11 @@ def loss_fn(params): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") - grad = jax.tree_map(lambda x: x / num_labels, grad) + grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} diff --git a/examples/flax/summarization/run_summarization_flax.py b/examples/flax/summarization/run_summarization_flax.py index 55777a4301bcc9..fb3eb8d28c117f 100644 --- a/examples/flax/summarization/run_summarization_flax.py +++ b/examples/flax/summarization/run_summarization_flax.py @@ -804,11 +804,11 @@ def compute_loss(params): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) # true grad = total grad / total samples grad = jax.lax.psum(grad, "batch") - grad = jax.tree_map(lambda x: x / num_labels, grad) + grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad) new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} @@ -824,7 +824,7 @@ def eval_step(params, batch, label_smoothing_factor=0.0): # true loss = total loss / total samples loss = jax.lax.psum(loss, "batch") - loss = jax.tree_map(lambda x: x / num_labels, loss) + loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss) metrics = {"loss": loss} return metrics